From c53bef933acfd2187d7f53b6858def40aea0413c Mon Sep 17 00:00:00 2001
From: ValueOn AG
Date: Sat, 4 Oct 2025 02:54:28 +0200
Subject: [PATCH] testing react mode
---
modules/services/serviceAi/mainServiceAi.py | 68 +-
modules/workflows/methods/methodAi.py | 280 ++-
.../workflows/processing/adaptive/__init__.py | 9 +
.../processing/adaptive/contentValidator.py | 308 +++
.../processing/adaptive/intentAnalyzer.py | 239 ++
.../processing/adaptive/learningEngine.py | 166 ++
.../processing/adaptive/progressTracker.py | 142 ++
modules/workflows/processing/core/__init__.py | 1 +
.../processing/core/actionExecutor.py | 258 ++
.../processing/core/messageCreator.py | 361 +++
.../workflows/processing/core/taskPlanner.py | 311 +++
.../workflows/processing/core/validator.py | 104 +
modules/workflows/processing/handlingTasks.py | 2191 -----------------
.../workflows/processing/modes/__init__.py | 1 +
.../processing/modes/actionplanMode.py | 831 +++++++
.../workflows/processing/modes/baseMode.py | 60 +
.../workflows/processing/modes/reactMode.py | 907 +++++++
modules/workflows/processing/promptFactory.py | 1098 ---------
.../workflows/processing/shared/__init__.py | 1 +
.../processing/{ => shared}/executionState.py | 0
.../processing/shared/promptFactory.py | 321 +++
.../{ => shared}/promptFactoryPlaceholders.py | 209 +-
.../workflows/processing/workflowProcessor.py | 335 +++
modules/workflows/workflowManager.py | 20 +-
.../ai_result_r0t0a0.txt | 3 -
.../method_ai_20251003-200852/raw_result.txt | 11 -
.../ai_result_r0t0a0.txt | 3 -
.../method_ai_20251003-200904/raw_result.txt | 11 -
.../ai_result_r0t0a0.txt | 49 +
.../method_ai_20251004-000320/raw_result.txt | 12 +
.../summary.txt | 0
.../ai_result_r0t0a0.txt | 10 +
.../method_ai_20251004-000335/raw_result.txt | 53 +
.../summary.txt | 0
.../ai_result_r0t0a0.txt | 32 +
.../method_ai_20251004-001456/raw_result.txt | 12 +
.../summary.txt | 0
.../ai_result_r0t0a0.txt | 1 +
.../method_ai_20251004-001507/raw_result.txt | 12 +
.../method_ai_20251004-001507/summary.txt | 2 +
.../ai_result_r0t0a0.txt | 27 +
.../method_ai_20251004-001800/raw_result.txt | 12 +
.../method_ai_20251004-001800/summary.txt | 2 +
.../ai_result_r0t0a0.txt | 24 +
.../method_ai_20251004-001810/raw_result.txt | 12 +
.../method_ai_20251004-001810/summary.txt | 2 +
.../ai_result_r0t0a0.txt | 29 +-
.../raw_result.txt | 29 +-
.../method_ai_20251004-004539/summary.txt | 2 +
.../ai_result_r0t0a0.txt | 7 +
.../method_ai_20251004-004746/raw_result.txt | 7 +
.../method_ai_20251004-004746/summary.txt | 2 +
.../m20251003-220757_1_1_0/message_text.txt | 3 -
.../obj/m20251003-220841_1_1_1/message.json | 19 -
.../m20251003-220841_1_1_1/message_text.txt | 4 -
.../document_001_metadata.json | 12 -
.../obj/m20251003-220843_1_1_1/message.json | 19 -
.../m20251003-220843_1_1_1/message_text.txt | 1 -
.../m20251003-220843_1_2_0/message_text.txt | 3 -
.../document_001_metadata.json | 12 -
.../obj/m20251003-220854_1_2_1/message.json | 19 -
.../m20251003-220854_1_2_1/message_text.txt | 1 -
.../obj/m20251003-220904_1_2_2/message.json | 19 -
.../document_001_metadata.json | 12 -
.../obj/m20251003-220906_1_2_2/message.json | 19 -
.../m20251003-220906_1_2_2/message_text.txt | 1 -
.../m20251003-220907_1_0_0/message_text.txt | 4 -
.../message.json | 6 +-
.../message_text.txt | 0
.../message.json | 8 +-
.../m20251004-015326_1_1_0/message_text.txt | 3 +
.../obj/m20251004-015331_1_1_1/message.json | 19 +
.../m20251004-015331_1_1_1/message_text.txt | 3 +
.../obj/m20251004-015335_1_1_0/message.json | 19 +
.../m20251004-015335_1_1_0/message_text.txt | 6 +
.../obj/m20251004-015335_1_2_0/message.json | 19 +
.../m20251004-015335_1_2_0/message_text.txt | 3 +
.../obj/m20251004-015339_1_2_1/message.json | 19 +
.../m20251004-015339_1_2_1/message_text.txt | 3 +
.../obj/m20251004-015342_1_0_0/message.json | 19 +
.../m20251004-015342_1_0_0/message_text.txt | 4 +
.../obj/m20251004-015342_1_2_0/message.json | 19 +
.../m20251004-015342_1_2_0/message_text.txt | 6 +
.../obj/m20251004-020301_1_0_0/message.json | 19 +
.../m20251004-020301_1_0_0/message_text.txt | 1 +
.../obj/m20251004-020306_1_1_0/message.json | 19 +
.../m20251004-020306_1_1_0/message_text.txt | 3 +
.../obj/m20251004-020311_1_1_1/message.json | 19 +
.../m20251004-020311_1_1_1/message_text.txt | 3 +
.../obj/m20251004-020321_1_1_1/message.json | 19 +
.../m20251004-020321_1_1_1/message_text.txt | 4 +
.../document_001_metadata.json | 12 +
.../obj/m20251004-020324_1_1_0/message.json | 19 +
.../m20251004-020324_1_1_0/message_text.txt | 7 +
.../obj/m20251004-020324_1_2_0/message.json | 19 +
.../m20251004-020324_1_2_0/message_text.txt | 3 +
.../obj/m20251004-020329_1_2_1/message.json | 19 +
.../m20251004-020329_1_2_1/message_text.txt | 3 +
.../obj/m20251004-020335_1_1_1/message.json | 19 +
.../m20251004-020335_1_1_1/message_text.txt | 4 +
.../document_001_metadata.json | 12 +
.../obj/m20251004-020335_1_2_2/message.json | 19 +
.../m20251004-020335_1_2_2/message_text.txt | 3 +
.../obj/m20251004-020336_1_1_2/message.json | 19 +
.../m20251004-020336_1_1_2/message_text.txt | 6 +
.../obj/m20251004-020338_1_0_0/message.json | 19 +
.../m20251004-020338_1_0_0/message_text.txt | 4 +
.../obj/m20251004-020338_1_2_0/message.json | 19 +
.../m20251004-020338_1_2_0/message_text.txt | 6 +
.../obj/m20251004-021441_1_0_0/message.json | 19 +
.../m20251004-021441_1_0_0/message_text.txt | 1 +
.../obj/m20251004-021446_1_1_0/message.json | 19 +
.../m20251004-021446_1_1_0/message_text.txt | 8 +
.../obj/m20251004-021447_1_1_0/message.json | 19 +
.../m20251004-021447_1_1_0/message_text.txt | 3 +
.../obj/m20251004-021451_1_1_1/message.json | 19 +
.../m20251004-021451_1_1_1/message_text.txt | 3 +
.../obj/m20251004-021457_1_1_1/message.json | 19 +
.../m20251004-021457_1_1_1/message_text.txt | 4 +
.../document_001_metadata.json | 12 +
.../obj/m20251004-021500_1_1_0/message.json | 19 +
.../m20251004-021500_1_1_0/message_text.txt | 8 +
.../obj/m20251004-021500_1_2_0/message.json | 19 +
.../m20251004-021500_1_2_0/message_text.txt | 3 +
.../obj/m20251004-021504_1_2_1/message.json | 19 +
.../m20251004-021504_1_2_1/message_text.txt | 3 +
.../obj/m20251004-021507_1_1_1/message.json | 19 +
.../message_text.txt | 0
.../document_001_metadata.json | 12 +
.../message.json | 8 +-
.../m20251004-021511_1_0_0/message_text.txt | 4 +
.../obj/m20251004-021511_1_2_0/message.json | 19 +
.../m20251004-021511_1_2_0/message_text.txt | 6 +
.../obj/m20251004-021746_1_0_0/message.json | 19 +
.../m20251004-021746_1_0_0/message_text.txt | 1 +
.../obj/m20251004-021751_1_1_0/message.json | 19 +
.../m20251004-021751_1_1_0/message_text.txt | 3 +
.../obj/m20251004-021801_1_1_0/message.json | 19 +
.../m20251004-021801_1_1_0/message_text.txt | 4 +
.../obj/m20251004-021801_1_1_1/message.json | 19 +
.../m20251004-021801_1_1_1/message_text.txt | 4 +
.../document_001_metadata.json | 12 +
.../obj/m20251004-021801_1_2_0/message.json | 19 +
.../m20251004-021801_1_2_0/message_text.txt | 3 +
.../obj/m20251004-021811_1_0_0/message.json | 19 +
.../m20251004-021811_1_0_0/message_text.txt | 4 +
.../obj/m20251004-021811_1_2_0/message.json | 19 +
.../m20251004-021811_1_2_0/message_text.txt | 4 +
.../message.json | 8 +-
.../message_text.txt | 2 +-
.../document_001_metadata.json | 12 +
.../obj/m20251004-022428_1_0_0/message.json | 19 +
.../m20251004-022428_1_0_0/message_text.txt | 1 +
.../obj/m20251004-022433_1_1_0/message.json | 19 +
.../m20251004-022433_1_1_0/message_text.txt | 3 +
.../obj/m20251004-022437_1_1_0/message.json | 19 +
.../m20251004-022437_1_1_0/message_text.txt | 4 +
.../message.json | 10 +-
.../m20251004-022437_1_2_0/message_text.txt | 3 +
.../obj/m20251004-022440_1_0_0/message.json | 19 +
.../m20251004-022440_1_0_0/message_text.txt | 4 +
.../obj/m20251004-022440_1_2_0/message.json | 19 +
.../m20251004-022440_1_2_0/message_text.txt | 4 +
.../obj/m20251004-023003_1_0_0/message.json | 19 +
.../m20251004-023003_1_0_0/message_text.txt | 1 +
.../obj/m20251004-023008_1_1_0/message.json | 19 +
.../m20251004-023008_1_1_0/message_text.txt | 3 +
.../obj/m20251004-023011_1_1_0/message.json | 19 +
.../m20251004-023011_1_1_0/message_text.txt | 4 +
.../obj/m20251004-023011_1_1_1/message.json | 19 +
.../m20251004-023011_1_1_1/message_text.txt | 6 +
.../obj/m20251004-023012_1_2_0/message.json | 19 +
.../m20251004-023012_1_2_0/message_text.txt | 3 +
.../obj/m20251004-023015_1_2_0/message.json | 19 +
.../m20251004-023015_1_2_0/message_text.txt | 4 +
.../obj/m20251004-023015_1_2_1/message.json | 19 +
.../m20251004-023015_1_2_1/message_text.txt | 6 +
.../obj/m20251004-023016_1_0_0/message.json | 19 +
.../m20251004-023016_1_0_0/message_text.txt | 4 +
.../obj/m20251004-023238_1_0_0/message.json | 19 +
.../m20251004-023238_1_0_0/message_text.txt | 1 +
.../obj/m20251004-023243_1_1_0/message.json | 19 +
.../m20251004-023243_1_1_0/message_text.txt | 3 +
.../obj/m20251004-023246_1_1_1/message.json | 19 +
.../m20251004-023246_1_1_1/message_text.txt | 6 +
.../obj/m20251004-023247_1_1_0/message.json | 19 +
.../m20251004-023247_1_1_0/message_text.txt | 4 +
.../obj/m20251004-023247_1_2_0/message.json | 19 +
.../m20251004-023247_1_2_0/message_text.txt | 3 +
.../obj/m20251004-023251_1_0_0/message.json | 19 +
.../m20251004-023251_1_0_0/message_text.txt | 4 +
.../obj/m20251004-023251_1_2_0/message.json | 19 +
.../m20251004-023251_1_2_0/message_text.txt | 4 +
.../obj/m20251004-023251_1_2_1/message.json | 19 +
.../m20251004-023251_1_2_1/message_text.txt | 6 +
.../obj/m20251004-023636_1_0_0/message.json | 19 +
.../m20251004-023636_1_0_0/message_text.txt | 1 +
.../obj/m20251004-023641_1_1_0/message.json | 19 +
.../m20251004-023641_1_1_0/message_text.txt | 8 +
.../obj/m20251004-023642_1_1_0/message.json | 19 +
.../m20251004-023642_1_1_0/message_text.txt | 3 +
.../obj/m20251004-023645_1_1_0/message.json | 19 +
.../m20251004-023645_1_1_0/message_text.txt | 4 +
.../obj/m20251004-023645_1_1_1/message.json | 19 +
.../m20251004-023645_1_1_1/message_text.txt | 6 +
.../obj/m20251004-023645_1_2_0/message.json | 19 +
.../m20251004-023645_1_2_0/message_text.txt | 3 +
.../obj/m20251004-023648_1_0_0/message.json | 19 +
.../m20251004-023648_1_0_0/message_text.txt | 4 +
.../obj/m20251004-023648_1_2_0/message.json | 19 +
.../m20251004-023648_1_2_0/message_text.txt | 4 +
.../obj/m20251004-023648_1_2_1/message.json | 19 +
.../m20251004-023648_1_2_1/message_text.txt | 6 +
.../obj/m20251004-023929_1_0_0/message.json | 19 +
.../m20251004-023929_1_0_0/message_text.txt | 1 +
.../obj/m20251004-023935_1_1_0/message.json | 19 +
.../m20251004-023935_1_1_0/message_text.txt | 3 +
.../obj/m20251004-023939_1_1_0/message.json | 19 +
.../m20251004-023939_1_1_0/message_text.txt | 4 +
.../obj/m20251004-023939_1_1_1/message.json | 19 +
.../m20251004-023939_1_1_1/message_text.txt | 6 +
.../obj/m20251004-023939_1_2_0/message.json | 19 +
.../m20251004-023939_1_2_0/message_text.txt | 3 +
.../obj/m20251004-023942_1_0_0/message.json | 19 +
.../m20251004-023942_1_0_0/message_text.txt | 4 +
.../obj/m20251004-023942_1_2_0/message.json | 19 +
.../m20251004-023942_1_2_0/message_text.txt | 4 +
.../obj/m20251004-023942_1_2_1/message.json | 19 +
.../m20251004-023942_1_2_1/message_text.txt | 6 +
.../obj/m20251004-024459_1_0_0/message.json | 19 +
.../m20251004-024459_1_0_0/message_text.txt | 1 +
.../obj/m20251004-024504_1_1_0/message.json | 19 +
.../m20251004-024504_1_1_0/message_text.txt | 3 +
.../obj/m20251004-024540_1_1_0/message.json | 19 +
.../m20251004-024540_1_1_0/message_text.txt | 4 +
.../obj/m20251004-024540_1_1_1/message.json | 19 +
.../m20251004-024540_1_1_1/message_text.txt | 4 +
.../document_001_metadata.json | 12 +
.../obj/m20251004-024540_1_2_0/message.json | 19 +
.../m20251004-024540_1_2_0/message_text.txt | 3 +
.../obj/m20251004-024543_1_2_1/message.json | 19 +
.../m20251004-024543_1_2_1/message_text.txt | 6 +
.../obj/m20251004-024544_1_0_0/message.json | 19 +
.../m20251004-024544_1_0_0/message_text.txt | 4 +
.../obj/m20251004-024544_1_2_0/message.json | 19 +
.../m20251004-024544_1_2_0/message_text.txt | 4 +
.../obj/m20251004-024704_1_0_0/message.json | 19 +
.../m20251004-024704_1_0_0/message_text.txt | 1 +
.../obj/m20251004-024709_1_1_0/message.json | 19 +
.../m20251004-024709_1_1_0/message_text.txt | 3 +
.../obj/m20251004-024746_1_1_1/message.json | 19 +
.../m20251004-024746_1_1_1/message_text.txt | 4 +
.../document_001_metadata.json | 12 +
.../obj/m20251004-024749_1_1_0/message.json | 19 +
.../m20251004-024749_1_1_0/message_text.txt | 4 +
.../obj/m20251004-024750_1_2_0/message.json | 19 +
.../m20251004-024750_1_2_0/message_text.txt | 3 +
.../obj/m20251004-024754_1_2_1/message.json | 19 +
.../m20251004-024754_1_2_1/message_text.txt | 6 +
.../obj/m20251004-024759_1_2_2/message.json | 19 +
.../m20251004-024759_1_2_2/message_text.txt | 6 +
.../obj/m20251004-024803_1_2_3/message.json | 19 +
.../m20251004-024803_1_2_3/message_text.txt | 6 +
.../obj/m20251004-024807_1_2_4/message.json | 19 +
.../m20251004-024807_1_2_4/message_text.txt | 6 +
.../obj/m20251004-024812_1_2_5/message.json | 19 +
.../m20251004-024812_1_2_5/message_text.txt | 6 +
.../obj/m20251004-024813_1_0_0/message.json | 19 +
.../m20251004-024813_1_0_0/message_text.txt | 4 +
.../obj/m20251004-024813_1_2_0/message.json | 19 +
.../m20251004-024813_1_2_0/message_text.txt | 4 +
271 files changed, 7285 insertions(+), 3729 deletions(-)
create mode 100644 modules/workflows/processing/adaptive/__init__.py
create mode 100644 modules/workflows/processing/adaptive/contentValidator.py
create mode 100644 modules/workflows/processing/adaptive/intentAnalyzer.py
create mode 100644 modules/workflows/processing/adaptive/learningEngine.py
create mode 100644 modules/workflows/processing/adaptive/progressTracker.py
create mode 100644 modules/workflows/processing/core/__init__.py
create mode 100644 modules/workflows/processing/core/actionExecutor.py
create mode 100644 modules/workflows/processing/core/messageCreator.py
create mode 100644 modules/workflows/processing/core/taskPlanner.py
create mode 100644 modules/workflows/processing/core/validator.py
delete mode 100644 modules/workflows/processing/handlingTasks.py
create mode 100644 modules/workflows/processing/modes/__init__.py
create mode 100644 modules/workflows/processing/modes/actionplanMode.py
create mode 100644 modules/workflows/processing/modes/baseMode.py
create mode 100644 modules/workflows/processing/modes/reactMode.py
delete mode 100644 modules/workflows/processing/promptFactory.py
create mode 100644 modules/workflows/processing/shared/__init__.py
rename modules/workflows/processing/{ => shared}/executionState.py (100%)
create mode 100644 modules/workflows/processing/shared/promptFactory.py
rename modules/workflows/processing/{ => shared}/promptFactoryPlaceholders.py (62%)
create mode 100644 modules/workflows/processing/workflowProcessor.py
delete mode 100644 test-chat/extraction/method_ai_20251003-200852/ai_result_r0t0a0.txt
delete mode 100644 test-chat/extraction/method_ai_20251003-200852/raw_result.txt
delete mode 100644 test-chat/extraction/method_ai_20251003-200904/ai_result_r0t0a0.txt
delete mode 100644 test-chat/extraction/method_ai_20251003-200904/raw_result.txt
create mode 100644 test-chat/extraction/method_ai_20251004-000320/ai_result_r0t0a0.txt
create mode 100644 test-chat/extraction/method_ai_20251004-000320/raw_result.txt
rename test-chat/extraction/{method_ai_20251003-200841 => method_ai_20251004-000320}/summary.txt (100%)
create mode 100644 test-chat/extraction/method_ai_20251004-000335/ai_result_r0t0a0.txt
create mode 100644 test-chat/extraction/method_ai_20251004-000335/raw_result.txt
rename test-chat/extraction/{method_ai_20251003-200852 => method_ai_20251004-000335}/summary.txt (100%)
create mode 100644 test-chat/extraction/method_ai_20251004-001456/ai_result_r0t0a0.txt
create mode 100644 test-chat/extraction/method_ai_20251004-001456/raw_result.txt
rename test-chat/extraction/{method_ai_20251003-200904 => method_ai_20251004-001456}/summary.txt (100%)
create mode 100644 test-chat/extraction/method_ai_20251004-001507/ai_result_r0t0a0.txt
create mode 100644 test-chat/extraction/method_ai_20251004-001507/raw_result.txt
create mode 100644 test-chat/extraction/method_ai_20251004-001507/summary.txt
create mode 100644 test-chat/extraction/method_ai_20251004-001800/ai_result_r0t0a0.txt
create mode 100644 test-chat/extraction/method_ai_20251004-001800/raw_result.txt
create mode 100644 test-chat/extraction/method_ai_20251004-001800/summary.txt
create mode 100644 test-chat/extraction/method_ai_20251004-001810/ai_result_r0t0a0.txt
create mode 100644 test-chat/extraction/method_ai_20251004-001810/raw_result.txt
create mode 100644 test-chat/extraction/method_ai_20251004-001810/summary.txt
rename test-chat/extraction/{method_ai_20251003-200841 => method_ai_20251004-004539}/ai_result_r0t0a0.txt (73%)
rename test-chat/extraction/{method_ai_20251003-200841 => method_ai_20251004-004539}/raw_result.txt (73%)
create mode 100644 test-chat/extraction/method_ai_20251004-004539/summary.txt
create mode 100644 test-chat/extraction/method_ai_20251004-004746/ai_result_r0t0a0.txt
create mode 100644 test-chat/extraction/method_ai_20251004-004746/raw_result.txt
create mode 100644 test-chat/extraction/method_ai_20251004-004746/summary.txt
delete mode 100644 test-chat/obj/m20251003-220757_1_1_0/message_text.txt
delete mode 100644 test-chat/obj/m20251003-220841_1_1_1/message.json
delete mode 100644 test-chat/obj/m20251003-220841_1_1_1/message_text.txt
delete mode 100644 test-chat/obj/m20251003-220841_1_1_1/round1_task1_action1_results/document_001_metadata.json
delete mode 100644 test-chat/obj/m20251003-220843_1_1_1/message.json
delete mode 100644 test-chat/obj/m20251003-220843_1_1_1/message_text.txt
delete mode 100644 test-chat/obj/m20251003-220843_1_2_0/message_text.txt
delete mode 100644 test-chat/obj/m20251003-220853_1_2_1/round1_task2_action1_results/document_001_metadata.json
delete mode 100644 test-chat/obj/m20251003-220854_1_2_1/message.json
delete mode 100644 test-chat/obj/m20251003-220854_1_2_1/message_text.txt
delete mode 100644 test-chat/obj/m20251003-220904_1_2_2/message.json
delete mode 100644 test-chat/obj/m20251003-220904_1_2_2/round1_task2_action2_results/document_001_metadata.json
delete mode 100644 test-chat/obj/m20251003-220906_1_2_2/message.json
delete mode 100644 test-chat/obj/m20251003-220906_1_2_2/message_text.txt
delete mode 100644 test-chat/obj/m20251003-220907_1_0_0/message_text.txt
rename test-chat/obj/{m20251003-220751_1_0_0 => m20251004-015321_1_0_0}/message.json (72%)
rename test-chat/obj/{m20251003-220751_1_0_0 => m20251004-015321_1_0_0}/message_text.txt (100%)
rename test-chat/obj/{m20251003-220757_1_1_0 => m20251004-015326_1_1_0}/message.json (64%)
create mode 100644 test-chat/obj/m20251004-015326_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-015331_1_1_1/message.json
create mode 100644 test-chat/obj/m20251004-015331_1_1_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-015335_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-015335_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-015335_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-015335_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-015339_1_2_1/message.json
create mode 100644 test-chat/obj/m20251004-015339_1_2_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-015342_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-015342_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-015342_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-015342_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-020301_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-020301_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-020306_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-020306_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-020311_1_1_1/message.json
create mode 100644 test-chat/obj/m20251004-020311_1_1_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-020321_1_1_1/message.json
create mode 100644 test-chat/obj/m20251004-020321_1_1_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-020321_1_1_1/round1_task1_action1_prime_generation_steps/document_001_metadata.json
create mode 100644 test-chat/obj/m20251004-020324_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-020324_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-020324_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-020324_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-020329_1_2_1/message.json
create mode 100644 test-chat/obj/m20251004-020329_1_2_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-020335_1_1_1/message.json
create mode 100644 test-chat/obj/m20251004-020335_1_1_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-020335_1_1_1/round1_task1_action1_generate_prime_list/document_001_metadata.json
create mode 100644 test-chat/obj/m20251004-020335_1_2_2/message.json
create mode 100644 test-chat/obj/m20251004-020335_1_2_2/message_text.txt
create mode 100644 test-chat/obj/m20251004-020336_1_1_2/message.json
create mode 100644 test-chat/obj/m20251004-020336_1_1_2/message_text.txt
create mode 100644 test-chat/obj/m20251004-020338_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-020338_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-020338_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-020338_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-021441_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-021441_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-021446_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-021446_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-021447_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-021447_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-021451_1_1_1/message.json
create mode 100644 test-chat/obj/m20251004-021451_1_1_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-021457_1_1_1/message.json
create mode 100644 test-chat/obj/m20251004-021457_1_1_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-021457_1_1_1/round1_task1_action1_generate_prime_sieve_function/document_001_metadata.json
create mode 100644 test-chat/obj/m20251004-021500_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-021500_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-021500_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-021500_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-021504_1_2_1/message.json
create mode 100644 test-chat/obj/m20251004-021504_1_2_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-021507_1_1_1/message.json
rename test-chat/obj/{m20251003-220853_1_2_1 => m20251004-021507_1_1_1}/message_text.txt (100%)
create mode 100644 test-chat/obj/m20251004-021507_1_1_1/round1_task1_action1_generate_prime_numbers/document_001_metadata.json
rename test-chat/obj/{m20251003-220907_1_0_0 => m20251004-021511_1_0_0}/message.json (64%)
create mode 100644 test-chat/obj/m20251004-021511_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-021511_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-021511_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-021746_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-021746_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-021751_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-021751_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-021801_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-021801_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-021801_1_1_1/message.json
create mode 100644 test-chat/obj/m20251004-021801_1_1_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-021801_1_1_1/round1_task1_action1_results/document_001_metadata.json
create mode 100644 test-chat/obj/m20251004-021801_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-021801_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-021811_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-021811_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-021811_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-021811_1_2_0/message_text.txt
rename test-chat/obj/{m20251003-220853_1_2_1 => m20251004-021811_1_2_1}/message.json (65%)
rename test-chat/obj/{m20251003-220904_1_2_2 => m20251004-021811_1_2_1}/message_text.txt (67%)
create mode 100644 test-chat/obj/m20251004-021811_1_2_1/round1_task2_action1_results/document_001_metadata.json
create mode 100644 test-chat/obj/m20251004-022428_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-022428_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-022433_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-022433_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-022437_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-022437_1_1_0/message_text.txt
rename test-chat/obj/{m20251003-220843_1_2_0 => m20251004-022437_1_2_0}/message.json (61%)
create mode 100644 test-chat/obj/m20251004-022437_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-022440_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-022440_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-022440_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-022440_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023003_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-023003_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023008_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-023008_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023011_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-023011_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023011_1_1_1/message.json
create mode 100644 test-chat/obj/m20251004-023011_1_1_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-023012_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-023012_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023015_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-023015_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023015_1_2_1/message.json
create mode 100644 test-chat/obj/m20251004-023015_1_2_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-023016_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-023016_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023238_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-023238_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023243_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-023243_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023246_1_1_1/message.json
create mode 100644 test-chat/obj/m20251004-023246_1_1_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-023247_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-023247_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023247_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-023247_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023251_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-023251_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023251_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-023251_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023251_1_2_1/message.json
create mode 100644 test-chat/obj/m20251004-023251_1_2_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-023636_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-023636_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023641_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-023641_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023642_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-023642_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023645_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-023645_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023645_1_1_1/message.json
create mode 100644 test-chat/obj/m20251004-023645_1_1_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-023645_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-023645_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023648_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-023648_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023648_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-023648_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023648_1_2_1/message.json
create mode 100644 test-chat/obj/m20251004-023648_1_2_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-023929_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-023929_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023935_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-023935_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023939_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-023939_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023939_1_1_1/message.json
create mode 100644 test-chat/obj/m20251004-023939_1_1_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-023939_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-023939_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023942_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-023942_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023942_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-023942_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-023942_1_2_1/message.json
create mode 100644 test-chat/obj/m20251004-023942_1_2_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-024459_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-024459_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-024504_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-024504_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-024540_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-024540_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-024540_1_1_1/message.json
create mode 100644 test-chat/obj/m20251004-024540_1_1_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-024540_1_1_1/round1_task1_action1_results/document_001_metadata.json
create mode 100644 test-chat/obj/m20251004-024540_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-024540_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-024543_1_2_1/message.json
create mode 100644 test-chat/obj/m20251004-024543_1_2_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-024544_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-024544_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-024544_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-024544_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-024704_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-024704_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-024709_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-024709_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-024746_1_1_1/message.json
create mode 100644 test-chat/obj/m20251004-024746_1_1_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-024746_1_1_1/round1_task1_action1_results/document_001_metadata.json
create mode 100644 test-chat/obj/m20251004-024749_1_1_0/message.json
create mode 100644 test-chat/obj/m20251004-024749_1_1_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-024750_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-024750_1_2_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-024754_1_2_1/message.json
create mode 100644 test-chat/obj/m20251004-024754_1_2_1/message_text.txt
create mode 100644 test-chat/obj/m20251004-024759_1_2_2/message.json
create mode 100644 test-chat/obj/m20251004-024759_1_2_2/message_text.txt
create mode 100644 test-chat/obj/m20251004-024803_1_2_3/message.json
create mode 100644 test-chat/obj/m20251004-024803_1_2_3/message_text.txt
create mode 100644 test-chat/obj/m20251004-024807_1_2_4/message.json
create mode 100644 test-chat/obj/m20251004-024807_1_2_4/message_text.txt
create mode 100644 test-chat/obj/m20251004-024812_1_2_5/message.json
create mode 100644 test-chat/obj/m20251004-024812_1_2_5/message_text.txt
create mode 100644 test-chat/obj/m20251004-024813_1_0_0/message.json
create mode 100644 test-chat/obj/m20251004-024813_1_0_0/message_text.txt
create mode 100644 test-chat/obj/m20251004-024813_1_2_0/message.json
create mode 100644 test-chat/obj/m20251004-024813_1_2_0/message_text.txt
diff --git a/modules/services/serviceAi/mainServiceAi.py b/modules/services/serviceAi/mainServiceAi.py
index 72049a60..59ad66ce 100644
--- a/modules/services/serviceAi/mainServiceAi.py
+++ b/modules/services/serviceAi/mainServiceAi.py
@@ -742,36 +742,21 @@ class AiService:
# Ensure aiObjects is initialized
await self._ensureAiObjectsInitialized()
- # Get available models for planning (text + reasoning capabilities)
- models = self._getModelsForOperation("planning", options)
+ # Build full prompt with placeholders
+ full_prompt = self._buildPromptWithPlaceholders(prompt, placeholders)
- for model in models:
- try:
- # Build full prompt with placeholders
- full_prompt = self._buildPromptWithPlaceholders(prompt, placeholders)
-
- # Check size and reduce if needed
- if self._exceedsTokenLimit(full_prompt, model, options.safetyMargin):
- full_prompt = self._reducePlanningPrompt(full_prompt, placeholders, model, options)
-
- # Make AI call using AiObjects
- request = AiCallRequest(
- prompt=full_prompt,
- context="", # Context is already included in the prompt
- options=options
- )
- response = await self.aiObjects.call(request)
- try:
- logger.debug(f"AI model selected (planning): {getattr(response, 'modelName', 'unknown')}")
- except Exception:
- pass
- return response.content
-
- except Exception as e:
- logger.warning(f"Planning model {model.name} failed: {e}")
- continue
-
- raise Exception("All planning models failed - check model availability and capabilities")
+ # Make AI call using AiObjects (let it handle model selection)
+ request = AiCallRequest(
+ prompt=full_prompt,
+ context="", # Context is already included in the prompt
+ options=options
+ )
+ response = await self.aiObjects.call(request)
+ try:
+ logger.debug(f"AI model selected (planning): {getattr(response, 'modelName', 'unknown')}")
+ except Exception:
+ pass
+ return response.content
async def _callAiText(
self,
@@ -970,16 +955,21 @@ class AiService:
"""
Get models capable of handling the specific operation with capability filtering.
"""
- # For now, return a default model - this will be enhanced with actual model registry
- default_model = ModelCapabilities(
- name="default",
- maxTokens=4000,
- capabilities=["text", "reasoning"] if operation_type == "planning" else ["text"],
- costPerToken=0.001,
- processingTime=1.0,
- isAvailable=True
- )
- return [default_model]
+ # Use the actual AI objects model selection instead of hardcoded default
+ if hasattr(self, 'aiObjects') and self.aiObjects:
+ # Let AiObjects handle the model selection
+ return []
+ else:
+ # Fallback to default model if AiObjects not available
+ default_model = ModelCapabilities(
+ name="default",
+ maxTokens=4000,
+ capabilities=["text", "reasoning"] if operation_type == "planning" else ["text"],
+ costPerToken=0.001,
+ processingTime=1.0,
+ isAvailable=True
+ )
+ return [default_model]
def _buildPromptWithPlaceholders(self, prompt: str, placeholders: Optional[Dict[str, str]]) -> str:
"""
diff --git a/modules/workflows/methods/methodAi.py b/modules/workflows/methods/methodAi.py
index 25a3498e..2bb667e3 100644
--- a/modules/workflows/methods/methodAi.py
+++ b/modules/workflows/methods/methodAi.py
@@ -30,18 +30,18 @@ class MethodAi(MethodBase):
@action
async def process(self, parameters: Dict[str, Any]) -> ActionResult:
"""
- AI text processing and analysis - returns plain text only, NO document generation
+ AI data delivery and analysis - returns plain text only, NO document generation
- USE FOR: Text analysis, data processing, content generation, research, Q&A, brainstorming, summarization, translation, code generation
- DO NOT USE FOR: Creating formatted documents (Word, PDF, Excel), document generation, file creation
+ USE FOR: Data delivery, analysis, research, Q&A, summarization, translation
+ DO NOT USE FOR: Code generation, creating formatted documents (Word, PDF, Excel), document generation, file creation
- INPUT REQUIREMENTS: Requires aiPrompt parameter (the question or task for AI)
+ INPUT REQUIREMENTS: Requires aiPrompt parameter (what to deliver)
OUTPUT FORMAT: Plain text only (.txt, .json, .md, .csv, .xml) - NO binary files
DEPENDENCIES: None - can work standalone
- WORKFLOW POSITION: Use for analysis, research, or text processing tasks
+ WORKFLOW POSITION: Use for data delivery, analysis, research, or text processing tasks
Parameters:
- aiPrompt (str): The AI prompt for processing
+ aiPrompt (str): The AI prompt for what we want to have delivered
documentList (list, optional): List of document references to include in context
resultType (str, optional): Output format type - use 'txt', 'json', 'md', 'csv', or 'xml' (defaults to 'txt')
processingMode (str, optional): Processing mode - use 'basic', 'advanced', or 'detailed' (defaults to 'basic')
@@ -53,7 +53,14 @@ class MethodAi(MethodBase):
requiredTags (list, optional): Required model tags - use 'text', 'chat', 'reasoning', 'analysis', 'image', 'vision', 'web', 'search', etc.
"""
try:
+ # Debug logging to see what parameters are received
+ logger.info(f"MethodAi.process received parameters: {parameters}")
+ logger.info(f"Parameters type: {type(parameters)}")
+ logger.info(f"Parameters keys: {list(parameters.keys()) if isinstance(parameters, dict) else 'Not a dict'}")
+
aiPrompt = parameters.get("aiPrompt")
+ logger.info(f"aiPrompt extracted: '{aiPrompt}' (type: {type(aiPrompt)})")
+
documentList = parameters.get("documentList", [])
if isinstance(documentList, str):
documentList = [documentList]
@@ -67,6 +74,7 @@ class MethodAi(MethodBase):
requiredTags = parameters.get("requiredTags")
if not aiPrompt:
+ logger.error(f"aiPrompt is missing or empty. Parameters: {parameters}")
return ActionResult.isFailure(
error="AI prompt is required"
)
@@ -117,7 +125,7 @@ class MethodAi(MethodBase):
if chatDocuments:
logger.info(f"Including {len(chatDocuments)} documents for AI processing")
- # Add format-specific instruction for structured response
+ # Add format-specific instruction for structured response with continuation support
if resultType == "json":
format_instruction = """
@@ -129,10 +137,12 @@ Please return your response in the following JSON format:
"mimeType": "application/json",
"comment": "optional comment about content"
}}
- ]
+ ],
+ "continue": false
}}
The data field should contain valid JSON content.
+For large datasets, set "continue": true to indicate more data is coming, and we'll ask for the next chunk.
"""
else:
format_instruction = f"""
@@ -145,10 +155,12 @@ Please return your response in the following JSON format:
"mimeType": "{output_mime_type}",
"comment": "optional comment about content"
}}
- ]
+ ],
+ "continue": false
}}
The data field should contain the content in {resultType.upper()} format.
+For large datasets, set "continue": true to indicate more data is coming, and we'll ask for the next chunk.
"""
call_prompt = enhanced_prompt + format_instruction
@@ -189,59 +201,123 @@ The data field should contain the content in {resultType.upper()} format.
except Exception:
pass
- # Parse JSON response from AI and create proper ActionDocument objects
+ # Parse JSON response from AI with streaming support
import json
import re
- from modules.datamodels.datamodelWorkflow import ActionDocument, ActionResult
+ from modules.datamodels.datamodelWorkflow import ActionDocument
action_documents = []
+ all_data_chunks = [] # Store all data chunks for merging
+
try:
- # Clean up the response (remove markdown code blocks if present)
- cleaned_result = (result or "").strip()
- # Remove code fences anywhere in the text
- cleaned_result = re.sub(r"```json|```", "", cleaned_result).strip()
+ # Process streaming response
+ chunk_number = 0
+ continue_processing = True
+ current_result = result
+
+ while continue_processing:
+ chunk_number += 1
+ logger.info(f"Processing AI response chunk {chunk_number}")
+
+ # Clean up the response (remove markdown code blocks if present)
+ cleaned_result = (current_result or "").strip()
+ # Remove code fences anywhere in the text
+ cleaned_result = re.sub(r"```json|```", "", cleaned_result).strip()
- # Try direct parse first
- try:
- parsed_response = json.loads(cleaned_result)
- except Exception:
- # Heuristic extraction: find the largest {...} block
- start = cleaned_result.find("{")
- end = cleaned_result.rfind("}")
- if start != -1 and end != -1 and end > start:
- candidate = cleaned_result[start:end+1]
- # Remove trailing commas before closing braces/brackets
- candidate = re.sub(r",\s*([}\]])", r"\1", candidate)
- parsed_response = json.loads(candidate)
- else:
- # Try extracting a JSON code block via regex as last resort
- match = re.search(r"\{[\s\S]*\}", cleaned_result)
- if match:
- candidate = re.sub(r",\s*([}\]])", r"\1", match.group(0))
+ # Try direct parse first
+ try:
+ parsed_response = json.loads(cleaned_result)
+ except Exception:
+ # Heuristic extraction: find the largest {...} block
+ start = cleaned_result.find("{")
+ end = cleaned_result.rfind("}")
+ if start != -1 and end != -1 and end > start:
+ candidate = cleaned_result[start:end+1]
+ # Remove trailing commas before closing braces/brackets
+ candidate = re.sub(r",\s*([}\]])", r"\1", candidate)
parsed_response = json.loads(candidate)
else:
- raise
+ # Try extracting a JSON code block via regex as last resort
+ match = re.search(r"\{[\s\S]*\}", cleaned_result)
+ if match:
+ candidate = re.sub(r",\s*([}\]])", r"\1", match.group(0))
+ parsed_response = json.loads(candidate)
+ else:
+ raise
+
+ # Check if we should continue
+ continue_processing = parsed_response.get("continue", False)
+
+ # Extract documents from response
+ if isinstance(parsed_response, dict) and "documents" in parsed_response:
+ for doc in parsed_response["documents"]:
+ if isinstance(doc, dict):
+ all_data_chunks.append(doc.get("data", ""))
+
+ # If we need to continue, ask for the next chunk
+ if continue_processing:
+ logger.info(f"AI indicated more data coming, requesting chunk {chunk_number + 1}")
+
+ # Build context from previous chunks
+ previous_data_summary = ""
+ if all_data_chunks:
+ # Show a summary of what was already provided
+ total_chars = sum(len(str(chunk)) for chunk in all_data_chunks)
+ previous_data_summary = f"""
+CONTEXT: You have already provided {len(all_data_chunks)} chunks of data ({total_chars} characters total).
+The last chunk contained: {str(all_data_chunks[-1])[:200]}{'...' if len(str(all_data_chunks[-1])) > 200 else ''}
+
+Please continue with the next chunk, ensuring no duplication of previous data.
+"""
+
+ continuation_prompt = f"""
+{previous_data_summary}
+
+Please continue with the next chunk of data. Return the same JSON format:
+{{
+ "documents": [
+ {{
+ "data": "next chunk of data here",
+ "mimeType": "{output_mime_type}",
+ "comment": "chunk {chunk_number + 1}"
+ }}
+ ],
+ "continue": false
+}}
+
+Set "continue": false when this is the final chunk.
+"""
+
+ # Make another AI call for the next chunk
+ current_result = await self.services.ai.callAi(
+ prompt=continuation_prompt,
+ options=options
+ )
+
+ if not current_result:
+ logger.warning("No response for continuation chunk, stopping")
+ break
- # Extract documents from response
- if isinstance(parsed_response, dict) and "documents" in parsed_response:
- for doc in parsed_response["documents"]:
- if isinstance(doc, dict):
- # Generate meaningful file name with workflow context
- extension = output_extension.lstrip('.') # Remove leading dot
- meaningful_name = self._generateMeaningfulFileName(
- base_name="ai",
- extension=extension,
- action_name="result"
- )
- action_documents.append(ActionDocument(
- documentName=meaningful_name,
- documentData=doc.get("data", ""),
- mimeType=doc.get("mimeType", output_mime_type)
- ))
-
- # If no documents found in JSON, create a single document from the raw result
- if not action_documents:
- extension = output_extension.lstrip('.') # Remove leading dot
+ # Merge all data chunks into final documents using intelligent merging
+ if all_data_chunks:
+ merged_data = self._mergeDataChunks(all_data_chunks, resultType, output_mime_type)
+
+ # Create final merged document
+ extension = output_extension.lstrip('.')
+ meaningful_name = self._generateMeaningfulFileName(
+ base_name="ai",
+ extension=extension,
+ action_name="result"
+ )
+
+ action_documents.append(ActionDocument(
+ documentName=meaningful_name,
+ documentData=merged_data,
+ mimeType=output_mime_type
+ ))
+ else:
+ # Fallback: create single document from raw result
+ extension = output_extension.lstrip('.')
meaningful_name = self._generateMeaningfulFileName(
base_name="ai",
extension=extension,
@@ -403,3 +479,101 @@ The data field should contain the content in {resultType.upper()} format.
return ActionResult.isFailure(
error=str(e)
)
+
+ def _mergeDataChunks(self, chunks: List[str], resultType: str, mimeType: str) -> str:
+ """Intelligently merge data chunks using strategies based on content type"""
+ try:
+ if resultType == "json":
+ return self._mergeJsonChunks(chunks)
+ elif resultType in ["csv", "table"]:
+ return self._mergeTableChunks(chunks)
+ elif resultType in ["txt", "md", "text"]:
+ return self._mergeTextChunks(chunks)
+ else:
+ # Default: simple concatenation
+ return "\n".join(str(chunk) for chunk in chunks)
+ except Exception as e:
+ logger.warning(f"Failed to merge chunks intelligently: {str(e)}, using simple concatenation")
+ return "\n".join(str(chunk) for chunk in chunks)
+
+ def _mergeJsonChunks(self, chunks: List[str]) -> str:
+ """Merge JSON chunks intelligently"""
+ import json
+
+ merged_data = []
+ for i, chunk in enumerate(chunks):
+ try:
+ if isinstance(chunk, str):
+ chunk_data = json.loads(chunk)
+ else:
+ chunk_data = chunk
+
+ if isinstance(chunk_data, list):
+ merged_data.extend(chunk_data)
+ elif isinstance(chunk_data, dict):
+ # For objects, merge by combining keys
+ if not merged_data:
+ merged_data = chunk_data
+ else:
+ if isinstance(merged_data, dict):
+ merged_data.update(chunk_data)
+ else:
+ merged_data.append(chunk_data)
+ else:
+ merged_data.append(chunk_data)
+ except Exception as e:
+ logger.warning(f"Failed to parse chunk {i}: {str(e)}")
+ # Add as string if JSON parsing fails
+ merged_data.append(str(chunk))
+
+ return json.dumps(merged_data, indent=2)
+
+ def _mergeTableChunks(self, chunks: List[str]) -> str:
+ """Merge table chunks (CSV) intelligently"""
+ import csv
+ import io
+
+ merged_rows = []
+ headers = None
+
+ for i, chunk in enumerate(chunks):
+ try:
+ # Parse CSV chunk
+ reader = csv.reader(io.StringIO(str(chunk)))
+ rows = list(reader)
+
+ if not rows:
+ continue
+
+ # First chunk: capture headers
+ if i == 0:
+ headers = rows[0] if rows else []
+ merged_rows.extend(rows)
+ else:
+ # Subsequent chunks: skip header if it matches
+ if rows and rows[0] == headers:
+ merged_rows.extend(rows[1:]) # Skip duplicate header
+ else:
+ merged_rows.extend(rows)
+
+ except Exception as e:
+ logger.warning(f"Failed to parse table chunk {i}: {str(e)}")
+ # Add as raw text if CSV parsing fails
+ merged_rows.append([f"Raw chunk {i}: {str(chunk)[:100]}..."])
+
+ # Convert back to CSV
+ output = io.StringIO()
+ writer = csv.writer(output)
+ writer.writerows(merged_rows)
+ return output.getvalue()
+
+ def _mergeTextChunks(self, chunks: List[str]) -> str:
+ """Merge text chunks intelligently"""
+ # Simple concatenation with proper spacing
+ merged = []
+ for chunk in chunks:
+ chunk_str = str(chunk).strip()
+ if chunk_str:
+ merged.append(chunk_str)
+
+ return "\n\n".join(merged) # Double newline between chunks for readability
diff --git a/modules/workflows/processing/adaptive/__init__.py b/modules/workflows/processing/adaptive/__init__.py
new file mode 100644
index 00000000..fdff3698
--- /dev/null
+++ b/modules/workflows/processing/adaptive/__init__.py
@@ -0,0 +1,9 @@
+# adaptive module for React mode
+# Provides adaptive learning capabilities
+
+from .intentAnalyzer import IntentAnalyzer, DataType, ExpectedFormat
+from .contentValidator import ContentValidator
+from .learningEngine import LearningEngine
+from .progressTracker import ProgressTracker
+
+__all__ = ['IntentAnalyzer', 'ContentValidator', 'LearningEngine', 'ProgressTracker', 'DataType', 'ExpectedFormat']
diff --git a/modules/workflows/processing/adaptive/contentValidator.py b/modules/workflows/processing/adaptive/contentValidator.py
new file mode 100644
index 00000000..48339cb4
--- /dev/null
+++ b/modules/workflows/processing/adaptive/contentValidator.py
@@ -0,0 +1,308 @@
+# contentValidator.py
+# Content validation for adaptive React mode
+
+import re
+import logging
+from typing import List, Dict, Any
+
+logger = logging.getLogger(__name__)
+
+class ContentValidator:
+ """Validates delivered content against user intent"""
+
+ def __init__(self):
+ pass
+
+ def validateContent(self, documents: List[Any], intent: Dict[str, Any]) -> Dict[str, Any]:
+ """Validates delivered content against user intent"""
+ try:
+ validationDetails = []
+
+ for doc in documents:
+ content = self._extractContent(doc)
+ detail = self._validateSingleDocument(content, doc, intent)
+ validationDetails.append(detail)
+
+ # Calculate overall success
+ overallSuccess = all(detail.get("successCriteriaMet", [False]) for detail in validationDetails)
+
+ # Calculate quality score
+ qualityScore = self._calculateQualityScore(validationDetails)
+
+ # Generate improvement suggestions
+ improvementSuggestions = self._generateImprovementSuggestions(validationDetails, intent)
+
+ return {
+ "overallSuccess": overallSuccess,
+ "qualityScore": qualityScore,
+ "validationDetails": validationDetails,
+ "improvementSuggestions": improvementSuggestions
+ }
+
+ except Exception as e:
+ logger.error(f"Error validating content: {str(e)}")
+ return self._createFailedValidationResult(str(e))
+
+ def _extractContent(self, doc: Any) -> str:
+ """Extracts content from a document"""
+ try:
+ if hasattr(doc, 'documentData'):
+ data = doc.documentData
+ if isinstance(data, dict) and 'content' in data:
+ return str(data['content'])
+ else:
+ return str(data)
+ return ""
+ except Exception:
+ return ""
+
+ def _validateSingleDocument(self, content: str, doc: Any, intent: Dict[str, Any]) -> Dict[str, Any]:
+ """Validates a single document against intent"""
+ # Check data type match
+ dataTypeMatch = self._checkDataTypeMatch(content, intent.get("dataType", "unknown"))
+
+ # Check format match
+ formatMatch = self._checkFormatMatch(content, intent.get("expectedFormat", "unknown"))
+
+ # Calculate quality score
+ qualityScore = self._calculateDocumentQualityScore(content, intent)
+
+ # Check success criteria
+ successCriteriaMet = self._checkSuccessCriteria(content, intent)
+
+ # Identify specific issues
+ specificIssues = self._identifySpecificIssues(content, intent)
+
+ # Generate improvement suggestions
+ improvementSuggestions = self._generateDocumentImprovementSuggestions(content, intent)
+
+ return {
+ "documentName": getattr(doc, 'documentName', 'Unknown'),
+ "dataTypeMatch": dataTypeMatch,
+ "formatMatch": formatMatch,
+ "qualityScore": qualityScore,
+ "successCriteriaMet": successCriteriaMet,
+ "specificIssues": specificIssues,
+ "improvementSuggestions": improvementSuggestions
+ }
+
+ def _checkDataTypeMatch(self, content: str, dataType: str) -> bool:
+ """Checks if content matches the expected data type"""
+ if dataType == "numbers":
+ return self._containsNumbers(content)
+ elif dataType == "text":
+ return self._containsText(content)
+ elif dataType == "documents":
+ return self._containsDocumentContent(content)
+ elif dataType == "analysis":
+ return self._containsAnalysis(content)
+ elif dataType == "code":
+ return self._containsCode(content)
+ else:
+ return True # Unknown type, assume match
+
+ def _containsNumbers(self, content: str) -> bool:
+ """Checks if content contains actual numbers (not code)"""
+ # Look for actual numbers in the content
+ numbers = re.findall(r'\b\d+\b', content)
+
+ # Check if it's code (contains function definitions, etc.)
+ isCode = any(keyword in content.lower() for keyword in [
+ 'def ', 'function', 'import ', 'class ', 'for ', 'while ', 'if ',
+ 'return', 'print(', 'console.log', 'public ', 'private '
+ ])
+
+ # If it's code, it doesn't contain actual numbers
+ if isCode:
+ return False
+
+ # If it has numbers and it's not code, it contains actual numbers
+ return len(numbers) > 0
+
+ def _containsText(self, content: str) -> bool:
+ """Checks if content contains readable text"""
+ # Remove numbers and special characters
+ textContent = re.sub(r'[^\w\s]', '', content)
+ words = textContent.split()
+
+ # Check if there are enough words to be considered text
+ return len(words) > 5
+
+ def _containsDocumentContent(self, content: str) -> bool:
+ """Checks if content is suitable for document creation"""
+ # Check for structured content
+ hasStructure = any(indicator in content for indicator in [
+ '\n', '\t', '|', '-', '*', '1.', '2.', 'β’', 'β¦'
+ ])
+
+ # Check for meaningful content
+ hasMeaningfulContent = len(content.strip()) > 50
+
+ return hasStructure and hasMeaningfulContent
+
+ def _containsAnalysis(self, content: str) -> bool:
+ """Checks if content contains analysis"""
+ analysisIndicators = [
+ 'analysis', 'findings', 'conclusion', 'summary', 'insights',
+ 'trends', 'patterns', 'comparison', 'evaluation', 'assessment'
+ ]
+
+ contentLower = content.lower()
+ return any(indicator in contentLower for indicator in analysisIndicators)
+
+ def _containsCode(self, content: str) -> bool:
+ """Checks if content contains code"""
+ codeIndicators = [
+ 'def ', 'function', 'import ', 'class ', 'for ', 'while ', 'if ',
+ 'return', 'print(', 'console.log', 'public ', 'private ', 'void ',
+ 'int ', 'string ', 'var ', 'let ', 'const '
+ ]
+
+ contentLower = content.lower()
+ return any(indicator in contentLower for indicator in codeIndicators)
+
+ def _checkFormatMatch(self, content: str, expectedFormat: str) -> bool:
+ """Checks if content matches expected format"""
+ if expectedFormat == "raw_data":
+ # Raw data should be simple, not heavily formatted
+ return not any(indicator in content for indicator in [
+ '', '', '
', '## ', '### ', '**', '__'
+ ])
+ elif expectedFormat == "formatted":
+ # Formatted content should have structure
+ return any(indicator in content for indicator in [
+ '\n', '\t', '|', '-', '*', '1.', '2.', 'β’'
+ ])
+ elif expectedFormat == "structured":
+ # Structured content should have clear organization
+ return any(indicator in content for indicator in [
+ '{', '}', '[', ']', '|', '\t', ' '
+ ])
+ else:
+ return True # Unknown format, assume match
+
+ def _checkSuccessCriteria(self, content: str, intent: Dict[str, Any]) -> List[bool]:
+ """Checks if content meets success criteria"""
+ criteriaMet = []
+ successCriteria = intent.get("successCriteria", [])
+
+ for criterion in successCriteria:
+ if 'prime numbers' in criterion.lower():
+ # Check if content contains actual prime numbers, not code
+ hasNumbers = bool(re.search(r'\b\d+\b', content))
+ isNotCode = not any(keyword in content.lower() for keyword in [
+ 'def ', 'function', 'import ', 'class '
+ ])
+ criteriaMet.append(hasNumbers and isNotCode)
+ elif 'document' in criterion.lower():
+ # Check if content is suitable for document creation
+ hasStructure = any(indicator in content for indicator in [
+ '\n', '\t', '|', '-', '*', '1.', '2.'
+ ])
+ criteriaMet.append(hasStructure)
+ elif 'format' in criterion.lower():
+ # Check if content is properly formatted
+ hasFormatting = any(indicator in content for indicator in [
+ '\n', '\t', '|', '-', '*', '1.', '2.', 'β’'
+ ])
+ criteriaMet.append(hasFormatting)
+ else:
+ # Generic check - content should not be empty
+ criteriaMet.append(len(content.strip()) > 0)
+
+ return criteriaMet
+
+ def _calculateDocumentQualityScore(self, content: str, intent: Dict[str, Any]) -> float:
+ """Calculates quality score for a single document"""
+ score = 0.0
+
+ # Base score for having content
+ if len(content.strip()) > 0:
+ score += 0.2
+
+ # Score for data type match
+ if self._checkDataTypeMatch(content, intent.get("dataType", "unknown")):
+ score += 0.3
+
+ # Score for format match
+ if self._checkFormatMatch(content, intent.get("expectedFormat", "unknown")):
+ score += 0.2
+
+ # Score for success criteria
+ successCriteriaMet = self._checkSuccessCriteria(content, intent)
+ if successCriteriaMet:
+ successRate = sum(successCriteriaMet) / len(successCriteriaMet)
+ score += 0.3 * successRate
+
+ return min(score, 1.0)
+
+ def _calculateQualityScore(self, validationDetails: List[Dict[str, Any]]) -> float:
+ """Calculates overall quality score from validation details"""
+ if not validationDetails:
+ return 0.0
+
+ totalScore = sum(detail.get("qualityScore", 0) for detail in validationDetails)
+ return totalScore / len(validationDetails)
+
+ def _identifySpecificIssues(self, content: str, intent: Dict[str, Any]) -> List[str]:
+ """Identifies specific issues with the content"""
+ issues = []
+
+ # Check for common issues
+ if intent.get("dataType") == "numbers" and self._containsCode(content):
+ issues.append("Content contains code instead of actual numbers")
+
+ if intent.get("expectedFormat") == "raw_data" and any(indicator in content for indicator in ['', '## ', '**']):
+ issues.append("Content is formatted when raw data was requested")
+
+ if len(content.strip()) == 0:
+ issues.append("Content is empty")
+
+ return issues
+
+ def _generateDocumentImprovementSuggestions(self, content: str, intent: Dict[str, Any]) -> List[str]:
+ """Generates improvement suggestions for a single document"""
+ suggestions = []
+
+ dataType = intent.get("dataType", "unknown")
+ expectedFormat = intent.get("expectedFormat", "unknown")
+
+ if dataType == "numbers" and self._containsCode(content):
+ suggestions.append("Deliver actual numbers, not code to generate them")
+
+ if expectedFormat == "raw_data" and any(indicator in content for indicator in ['', '## ']):
+ suggestions.append("Provide raw data without formatting")
+
+ if len(content.strip()) == 0:
+ suggestions.append("Provide actual content")
+
+ return suggestions
+
+ def _generateImprovementSuggestions(self, validationDetails: List[Dict[str, Any]],
+ intent: Dict[str, Any]) -> List[str]:
+ """Generates improvement suggestions based on validation results"""
+ suggestions = []
+
+ # Check for common issues
+ if not any(detail.get("dataTypeMatch", False) for detail in validationDetails):
+ dataType = intent.get("dataType", "unknown")
+ suggestions.append(f"Content should contain {dataType} data, not code or other formats")
+
+ if not any(detail.get("formatMatch", False) for detail in validationDetails):
+ expectedFormat = intent.get("expectedFormat", "unknown")
+ suggestions.append(f"Content should be in {expectedFormat} format")
+
+ # Add specific suggestions from validation details
+ for detail in validationDetails:
+ suggestions.extend(detail.get("improvementSuggestions", []))
+
+ return list(set(suggestions)) # Remove duplicates
+
+ def _createFailedValidationResult(self, error: str) -> Dict[str, Any]:
+ """Creates a failed validation result"""
+ return {
+ "overallSuccess": False,
+ "qualityScore": 0.0,
+ "validationDetails": [],
+ "improvementSuggestions": [f"Validation failed: {error}"]
+ }
diff --git a/modules/workflows/processing/adaptive/intentAnalyzer.py b/modules/workflows/processing/adaptive/intentAnalyzer.py
new file mode 100644
index 00000000..374742f3
--- /dev/null
+++ b/modules/workflows/processing/adaptive/intentAnalyzer.py
@@ -0,0 +1,239 @@
+# intentAnalyzer.py
+# Intent analysis for adaptive React mode
+
+import re
+import logging
+from typing import Dict, Any, List
+from enum import Enum
+
+logger = logging.getLogger(__name__)
+
+class DataType(Enum):
+ NUMBERS = "numbers"
+ TEXT = "text"
+ DOCUMENTS = "documents"
+ ANALYSIS = "analysis"
+ CODE = "code"
+ UNKNOWN = "unknown"
+
+class ExpectedFormat(Enum):
+ RAW_DATA = "raw_data"
+ FORMATTED = "formatted"
+ STRUCTURED = "structured"
+ VISUAL = "visual"
+ UNKNOWN = "unknown"
+
+class IntentAnalyzer:
+ """Analyzes user intent to understand what they actually want"""
+
+ def __init__(self):
+ self.dataTypePatterns = {
+ DataType.NUMBERS: [
+ r'\b(numbers?|digits?|count|list|sequence)\b',
+ r'\b(prime|fibonacci|random|even|odd)\s+(numbers?)\b',
+ r'\b(calculate|compute|generate)\s+(numbers?)\b',
+ r'\b(first|last)\s+\d+\s+(numbers?)\b'
+ ],
+ DataType.TEXT: [
+ r'\b(text|content|words?|sentences?|paragraphs?)\b',
+ r'\b(write|create|generate)\s+(text|content)\b',
+ r'\b(summary|description|explanation)\b',
+ r'\b(article|essay|report)\b'
+ ],
+ DataType.DOCUMENTS: [
+ r'\b(document|file|report|pdf|word|excel)\b',
+ r'\b(create|generate|make)\s+(document|file|report)\b',
+ r'\b(format|structure|organize)\s+(document)\b',
+ r'\b(presentation|slides?)\b'
+ ],
+ DataType.ANALYSIS: [
+ r'\b(analyze|analysis|examine|study|evaluate)\b',
+ r'\b(insights?|findings?|results?)\b',
+ r'\b(compare|contrast|evaluate)\b',
+ r'\b(trends?|patterns?)\b'
+ ],
+ DataType.CODE: [
+ r'\b(code|program|script|algorithm|function)\b',
+ r'\b(write|create|develop)\s+(code|program|script)\b',
+ r'\b(implement|build|construct)\b',
+ r'\b(debug|fix|optimize)\s+(code)\b'
+ ]
+ }
+
+ self.formatPatterns = {
+ ExpectedFormat.RAW_DATA: [
+ r'\b(raw|plain|simple|basic)\b',
+ r'\b(numbers?|data|list)\b(?!\s+(in|as|with))',
+ r'\b(just|only)\s+(numbers?|data)\b'
+ ],
+ ExpectedFormat.FORMATTED: [
+ r'\b(formatted|structured|organized|presented)\b',
+ r'\b(table|chart|graph|visual)\b',
+ r'\b(pretty|nice|clean)\s+(format|presentation)\b',
+ r'\b(professional|polished)\b'
+ ],
+ ExpectedFormat.STRUCTURED: [
+ r'\b(json|xml|csv|structured)\b',
+ r'\b(organized|categorized|grouped)\b',
+ r'\b(systematic|methodical)\b',
+ r'\b(database|spreadsheet)\b'
+ ]
+ }
+
+ def analyzeUserIntent(self, userPrompt: str, context: Any) -> Dict[str, Any]:
+ """Analyzes user intent from prompt and context"""
+ try:
+ # Extract primary goal
+ primaryGoal = self._extractPrimaryGoal(userPrompt)
+
+ # Classify data type
+ dataType = self._classifyDataType(userPrompt)
+
+ # Determine expected format
+ expectedFormat = self._determineExpectedFormat(userPrompt)
+
+ # Assess quality requirements
+ qualityRequirements = self._assessQualityRequirements(userPrompt, context)
+
+ # Extract success criteria
+ successCriteria = self._extractSuccessCriteria(userPrompt, context)
+
+ # Calculate confidence score
+ confidenceScore = self._calculateConfidenceScore(dataType, expectedFormat, successCriteria)
+
+ return {
+ "primaryGoal": primaryGoal,
+ "dataType": dataType.value,
+ "expectedFormat": expectedFormat.value,
+ "qualityRequirements": qualityRequirements,
+ "successCriteria": successCriteria,
+ "confidenceScore": confidenceScore
+ }
+
+ except Exception as e:
+ logger.error(f"Error analyzing user intent: {str(e)}")
+ return self._createDefaultIntentAnalysis(userPrompt)
+
+ def _extractPrimaryGoal(self, userPrompt: str) -> str:
+ """Extracts the primary goal from user prompt"""
+ # Simple extraction - can be enhanced
+ return userPrompt.strip()
+
+ def _classifyDataType(self, userPrompt: str) -> DataType:
+ """Classifies the type of data the user wants"""
+ promptLower = userPrompt.lower()
+
+ for dataType, patterns in self.dataTypePatterns.items():
+ for pattern in patterns:
+ if re.search(pattern, promptLower):
+ return dataType
+
+ return DataType.UNKNOWN
+
+ def _determineExpectedFormat(self, userPrompt: str) -> ExpectedFormat:
+ """Determines the expected format of the output"""
+ promptLower = userPrompt.lower()
+
+ for formatType, patterns in self.formatPatterns.items():
+ for pattern in patterns:
+ if re.search(pattern, promptLower):
+ return formatType
+
+ return ExpectedFormat.UNKNOWN
+
+ def _assessQualityRequirements(self, userPrompt: str, context: Any) -> Dict[str, Any]:
+ """Assesses quality requirements from prompt and context"""
+ promptLower = userPrompt.lower()
+
+ # Check for accuracy requirements
+ accuracyThreshold = 0.8
+ if any(word in promptLower for word in ['exact', 'precise', 'accurate', 'correct']):
+ accuracyThreshold = 0.95
+ elif any(word in promptLower for word in ['approximate', 'rough', 'estimate']):
+ accuracyThreshold = 0.7
+
+ # Check for completeness requirements
+ completenessThreshold = 0.8
+ if any(word in promptLower for word in ['complete', 'full', 'comprehensive', 'all']):
+ completenessThreshold = 0.95
+ elif any(word in promptLower for word in ['summary', 'brief', 'overview']):
+ completenessThreshold = 0.6
+
+ # Check for format requirements
+ formatRequirement = "any"
+ if any(word in promptLower for word in ['formatted', 'structured', 'organized']):
+ formatRequirement = "formatted"
+ elif any(word in promptLower for word in ['raw', 'plain', 'simple']):
+ formatRequirement = "raw"
+
+ return {
+ "accuracyThreshold": accuracyThreshold,
+ "completenessThreshold": completenessThreshold,
+ "formatRequirement": formatRequirement
+ }
+
+ def _extractSuccessCriteria(self, userPrompt: str, context: Any) -> List[str]:
+ """Extracts success criteria from prompt and context"""
+ criteria = []
+ promptLower = userPrompt.lower()
+
+ # Extract explicit criteria
+ if 'first' in promptLower and 'numbers' in promptLower:
+ criteria.append("Contains the first N numbers as requested")
+
+ if 'prime' in promptLower:
+ criteria.append("Contains actual prime numbers, not code to generate them")
+
+ if 'document' in promptLower:
+ criteria.append("Creates a properly formatted document")
+
+ if 'format' in promptLower:
+ criteria.append("Content is properly formatted as requested")
+
+ # Add context-based criteria
+ if hasattr(context, 'task_step') and context.task_step:
+ taskObjective = context.task_step.objective.lower()
+ if 'word' in taskObjective:
+ criteria.append("Creates a Word document")
+ if 'excel' in taskObjective:
+ criteria.append("Creates an Excel spreadsheet")
+
+ return criteria if criteria else ["Delivers what the user requested"]
+
+ def _calculateConfidenceScore(self, dataType: DataType, expectedFormat: ExpectedFormat,
+ successCriteria: List[str]) -> float:
+ """Calculates confidence score for the intent analysis"""
+ score = 0.0
+
+ # Data type confidence
+ if dataType != DataType.UNKNOWN:
+ score += 0.3
+
+ # Format confidence
+ if expectedFormat != ExpectedFormat.UNKNOWN:
+ score += 0.2
+
+ # Success criteria confidence
+ if len(successCriteria) > 0:
+ score += 0.3
+
+ # Additional confidence for specific patterns
+ if len(successCriteria) > 1:
+ score += 0.2
+
+ return min(score, 1.0)
+
+ def _createDefaultIntentAnalysis(self, userPrompt: str) -> Dict[str, Any]:
+ """Creates a default intent analysis when analysis fails"""
+ return {
+ "primaryGoal": userPrompt,
+ "dataType": "unknown",
+ "expectedFormat": "unknown",
+ "qualityRequirements": {
+ "accuracyThreshold": 0.8,
+ "completenessThreshold": 0.8,
+ "formatRequirement": "any"
+ },
+ "successCriteria": ["Delivers what the user requested"],
+ "confidenceScore": 0.1
+ }
diff --git a/modules/workflows/processing/adaptive/learningEngine.py b/modules/workflows/processing/adaptive/learningEngine.py
new file mode 100644
index 00000000..2d5836a6
--- /dev/null
+++ b/modules/workflows/processing/adaptive/learningEngine.py
@@ -0,0 +1,166 @@
+# learningEngine.py
+# Learning engine for adaptive React mode
+
+import json
+import logging
+from typing import Dict, Any, List
+from datetime import datetime, timezone
+
+logger = logging.getLogger(__name__)
+
+class LearningEngine:
+ """Learns from feedback and adapts future behavior"""
+
+ def __init__(self):
+ self.strategies = {}
+ self.feedbackHistory = []
+
+ def learnFromFeedback(self, feedback: Dict[str, Any], context: Any, intent: Dict[str, Any]):
+ """Learns from feedback and updates strategies"""
+ try:
+ # Store feedback
+ self.feedbackHistory.append({
+ "feedback": feedback,
+ "context": self._serializeContext(context),
+ "intent": intent,
+ "timestamp": datetime.now(timezone.utc).timestamp()
+ })
+
+ # Update strategies based on feedback
+ self._updateStrategies(feedback, intent)
+
+ logger.info(f"Learning from feedback: {feedback.get('actionAttempted', 'unknown')} - "
+ f"Quality: {feedback.get('qualityScore', 0):.2f}, Intent Match: {feedback.get('intentMatchScore', 0):.2f}")
+
+ except Exception as e:
+ logger.error(f"Error learning from feedback: {str(e)}")
+
+ def getImprovedStrategy(self, context: Any, intent: Dict[str, Any]) -> Dict[str, Any]:
+ """Returns improved strategy based on learning"""
+ try:
+ # Get strategy key based on intent
+ strategyKey = self._getStrategyKey(intent)
+
+ # Get existing strategy or create default
+ if strategyKey in self.strategies:
+ strategy = self.strategies[strategyKey]
+ logger.info(f"Using learned strategy for {strategyKey}: {strategy}")
+ return strategy
+ else:
+ # Create default strategy
+ defaultStrategy = self._createDefaultStrategy(intent)
+ self.strategies[strategyKey] = defaultStrategy
+ logger.info(f"Created default strategy for {strategyKey}")
+ return defaultStrategy
+
+ except Exception as e:
+ logger.error(f"Error getting improved strategy: {str(e)}")
+ return self._createDefaultStrategy(intent)
+
+ def _updateStrategies(self, feedback: Dict[str, Any], intent: Dict[str, Any]):
+ """Updates strategies based on feedback"""
+ strategyKey = self._getStrategyKey(intent)
+ actionAttempted = feedback.get('actionAttempted', 'unknown')
+ qualityScore = feedback.get('qualityScore', 0)
+ intentMatchScore = feedback.get('intentMatchScore', 0)
+
+ # Get or create strategy
+ if strategyKey not in self.strategies:
+ self.strategies[strategyKey] = self._createDefaultStrategy(intent)
+
+ strategy = self.strategies[strategyKey]
+
+ # Update based on success/failure
+ if qualityScore > 0.7 and intentMatchScore > 0.7:
+ # Successful action - reinforce it
+ if 'successfulActions' not in strategy:
+ strategy['successfulActions'] = []
+ if actionAttempted not in strategy['successfulActions']:
+ strategy['successfulActions'].append(actionAttempted)
+ strategy['successRate'] = min(strategy.get('successRate', 0.5) + 0.1, 1.0)
+ logger.info(f"Reinforced successful action: {actionAttempted}")
+
+ elif qualityScore < 0.3 or intentMatchScore < 0.3:
+ # Failed action - avoid it
+ if 'failedActions' not in strategy:
+ strategy['failedActions'] = []
+ if actionAttempted not in strategy['failedActions']:
+ strategy['failedActions'].append(actionAttempted)
+ strategy['successRate'] = max(strategy.get('successRate', 0.5) - 0.1, 0.0)
+ logger.info(f"Marked failed action to avoid: {actionAttempted}")
+
+ # Update last modified
+ strategy['lastModified'] = datetime.now(timezone.utc).timestamp()
+
+ def _getStrategyKey(self, intent: Dict[str, Any]) -> str:
+ """Gets strategy key based on intent"""
+ dataType = intent.get('dataType', 'unknown')
+ expectedFormat = intent.get('expectedFormat', 'unknown')
+ return f"{dataType}_{expectedFormat}"
+
+ def _createDefaultStrategy(self, intent: Dict[str, Any]) -> Dict[str, Any]:
+ """Creates a default strategy for the intent"""
+ dataType = intent.get('dataType', 'unknown')
+ expectedFormat = intent.get('expectedFormat', 'unknown')
+
+ # Create strategy based on intent type
+ if dataType == 'numbers':
+ return {
+ 'strategyId': f"numbers_{expectedFormat}",
+ 'successfulActions': [],
+ 'failedActions': [],
+ 'successRate': 0.5,
+ 'lastModified': datetime.now(timezone.utc).timestamp(),
+ 'recommendedPrompt': f"Deliver {dataType} data in {expectedFormat} format. Provide actual numbers, not code to generate them.",
+ 'avoidPrompt': "Do not ask AI to write code when user wants data. Deliver the data directly."
+ }
+ elif dataType == 'text':
+ return {
+ 'strategyId': f"text_{expectedFormat}",
+ 'successfulActions': [],
+ 'failedActions': [],
+ 'successRate': 0.5,
+ 'lastModified': datetime.now(timezone.utc).timestamp(),
+ 'recommendedPrompt': f"Generate {dataType} content in {expectedFormat} format.",
+ 'avoidPrompt': "Ensure content is readable and well-structured."
+ }
+ elif dataType == 'documents':
+ return {
+ 'strategyId': f"documents_{expectedFormat}",
+ 'successfulActions': [],
+ 'failedActions': [],
+ 'successRate': 0.5,
+ 'lastModified': datetime.now(timezone.utc).timestamp(),
+ 'recommendedPrompt': f"Create {dataType} in {expectedFormat} format with proper structure.",
+ 'avoidPrompt': "Ensure document is properly formatted and organized."
+ }
+ else:
+ return {
+ 'strategyId': f"unknown_{expectedFormat}",
+ 'successfulActions': [],
+ 'failedActions': [],
+ 'successRate': 0.5,
+ 'lastModified': datetime.now(timezone.utc).timestamp(),
+ 'recommendedPrompt': f"Deliver {dataType} content in {expectedFormat} format.",
+ 'avoidPrompt': "Ensure content matches user requirements."
+ }
+
+ def _serializeContext(self, context: Any) -> Dict[str, Any]:
+ """Serializes context for storage"""
+ try:
+ return {
+ "taskObjective": getattr(context, 'task_step', {}).get('objective', '') if hasattr(context, 'task_step') else '',
+ "workflowId": getattr(context, 'workflow_id', ''),
+ "availableDocuments": getattr(context, 'available_documents', [])
+ }
+ except Exception:
+ return {}
+
+ def getLearningSummary(self) -> Dict[str, Any]:
+ """Gets a summary of what has been learned"""
+ return {
+ "totalStrategies": len(self.strategies),
+ "totalFeedback": len(self.feedbackHistory),
+ "strategies": list(self.strategies.keys()),
+ "averageSuccessRate": sum(s.get('successRate', 0) for s in self.strategies.values()) / max(len(self.strategies), 1)
+ }
diff --git a/modules/workflows/processing/adaptive/progressTracker.py b/modules/workflows/processing/adaptive/progressTracker.py
new file mode 100644
index 00000000..80dfcf63
--- /dev/null
+++ b/modules/workflows/processing/adaptive/progressTracker.py
@@ -0,0 +1,142 @@
+# progressTracker.py
+# Progress tracking for adaptive React mode
+
+import logging
+from typing import Dict, Any, List
+from datetime import datetime, timezone
+
+logger = logging.getLogger(__name__)
+
+class ProgressTracker:
+ """Tracks what has been accomplished and what's still needed"""
+
+ def __init__(self):
+ self.completedObjectives = []
+ self.partialAchievements = []
+ self.failedAttempts = []
+ self.learningInsights = []
+ self.currentPhase = "planning"
+
+ def updateProgress(self, result: Any, validation: Dict[str, Any], intent: Dict[str, Any]):
+ """Updates progress tracking based on action result"""
+ try:
+ overallSuccess = validation.get('overallSuccess', False)
+ qualityScore = validation.get('qualityScore', 0)
+ improvementSuggestions = validation.get('improvementSuggestions', [])
+
+ if overallSuccess and qualityScore > 0.7:
+ # Successful completion
+ self.completedObjectives.append({
+ "objective": intent.get('primaryGoal', 'Unknown'),
+ "achievement": f"Quality score: {qualityScore:.2f}",
+ "qualityScore": qualityScore,
+ "timestamp": datetime.now(timezone.utc).timestamp()
+ })
+ self.currentPhase = "completed"
+ logger.info(f"Objective completed: {intent.get('primaryGoal', 'Unknown')}")
+
+ elif qualityScore > 0.3:
+ # Partial achievement
+ self.partialAchievements.append({
+ "objective": intent.get('primaryGoal', 'Unknown'),
+ "partialAchievement": f"Quality score: {qualityScore:.2f}",
+ "missingParts": improvementSuggestions,
+ "timestamp": datetime.now(timezone.utc).timestamp()
+ })
+ self.currentPhase = "partial"
+ logger.info(f"Partial achievement: {intent.get('primaryGoal', 'Unknown')}")
+
+ else:
+ # Failed attempt
+ self.failedAttempts.append({
+ "objective": intent.get('primaryGoal', 'Unknown'),
+ "failureReason": f"Quality score: {qualityScore:.2f}",
+ "learningOpportunity": improvementSuggestions,
+ "timestamp": datetime.now(timezone.utc).timestamp()
+ })
+ self.currentPhase = "failed"
+ logger.info(f"Failed attempt: {intent.get('primaryGoal', 'Unknown')}")
+
+ # Extract learning insights
+ if improvementSuggestions:
+ for suggestion in improvementSuggestions:
+ if suggestion not in self.learningInsights:
+ self.learningInsights.append(suggestion)
+
+ except Exception as e:
+ logger.error(f"Error updating progress: {str(e)}")
+
+ def getCurrentProgress(self) -> Dict[str, Any]:
+ """Gets current progress state"""
+ return {
+ "completedObjectives": self.completedObjectives,
+ "partialAchievements": self.partialAchievements,
+ "failedAttempts": self.failedAttempts,
+ "learningInsights": self.learningInsights,
+ "currentPhase": self.currentPhase,
+ "nextActionsSuggested": self._getNextActionSuggestions()
+ }
+
+ def shouldContinue(self, progress: Dict[str, Any], validation: Dict[str, Any]) -> bool:
+ """Determines if the task should continue"""
+ try:
+ # If we have completed objectives, don't continue
+ if progress.get('completedObjectives'):
+ return False
+
+ # If we have too many failed attempts, don't continue
+ if len(progress.get('failedAttempts', [])) >= 3:
+ return False
+
+ # If validation shows success, don't continue
+ if validation.get('overallSuccess', False):
+ return False
+
+ # Otherwise, continue
+ return True
+
+ except Exception as e:
+ logger.error(f"Error checking if should continue: {str(e)}")
+ return True # Default to continue on error
+
+ def _getNextActionSuggestions(self) -> List[str]:
+ """Suggests next actions based on progress"""
+ suggestions = []
+
+ # If we have failed attempts, suggest avoiding those actions
+ if self.failedAttempts:
+ suggestions.append("Avoid actions that have failed before")
+
+ # If we have partial achievements, suggest building on them
+ if self.partialAchievements:
+ suggestions.append("Build on partial achievements")
+
+ # If we have learning insights, suggest applying them
+ if self.learningInsights:
+ suggestions.extend(self.learningInsights[:3]) # Top 3 insights
+
+ # Default suggestions
+ if not suggestions:
+ suggestions.append("Try a different approach")
+ suggestions.append("Focus on user intent")
+
+ return suggestions
+
+ def getProgressSummary(self) -> Dict[str, Any]:
+ """Gets a summary of progress"""
+ return {
+ "totalCompleted": len(self.completedObjectives),
+ "totalPartial": len(self.partialAchievements),
+ "totalFailed": len(self.failedAttempts),
+ "totalInsights": len(self.learningInsights),
+ "currentPhase": self.currentPhase,
+ "successRate": len(self.completedObjectives) / max(len(self.completedObjectives) + len(self.failedAttempts), 1)
+ }
+
+ def reset(self):
+ """Resets progress tracking"""
+ self.completedObjectives = []
+ self.partialAchievements = []
+ self.failedAttempts = []
+ self.learningInsights = []
+ self.currentPhase = "planning"
diff --git a/modules/workflows/processing/core/__init__.py b/modules/workflows/processing/core/__init__.py
new file mode 100644
index 00000000..f2575ed7
--- /dev/null
+++ b/modules/workflows/processing/core/__init__.py
@@ -0,0 +1 @@
+# Core workflow processing modules
diff --git a/modules/workflows/processing/core/actionExecutor.py b/modules/workflows/processing/core/actionExecutor.py
new file mode 100644
index 00000000..f865ce50
--- /dev/null
+++ b/modules/workflows/processing/core/actionExecutor.py
@@ -0,0 +1,258 @@
+# actionExecutor.py
+# Action execution functionality for workflows
+
+import logging
+from typing import Dict, Any, List
+from modules.datamodels.datamodelWorkflow import ActionResult, TaskAction, TaskStep
+from modules.datamodels.datamodelChat import ChatWorkflow
+from modules.workflows.processing.shared.promptFactory import methods
+
+logger = logging.getLogger(__name__)
+
+class ActionExecutor:
+ """Handles execution of workflow actions"""
+
+ def __init__(self, services):
+ self.services = services
+
+ def _checkWorkflowStopped(self, workflow):
+ """Check if workflow has been stopped by user and raise exception if so"""
+ try:
+ # Get the current workflow status from the database to avoid stale data
+ current_workflow = self.services.interfaceDbChat.getWorkflow(workflow.id)
+ if current_workflow and current_workflow.status == "stopped":
+ logger.info("Workflow stopped by user, aborting action execution")
+ raise Exception("Workflow was stopped by user")
+ except Exception as e:
+ # If we can't get the current status due to other database issues, fall back to the in-memory object
+ logger.warning(f"Could not check current workflow status from database: {str(e)}")
+ if workflow and workflow.status == "stopped":
+ logger.info("Workflow stopped by user (from in-memory object), aborting action execution")
+ raise Exception("Workflow was stopped by user")
+
+ async def executeAction(self, methodName: str, actionName: str, parameters: Dict[str, Any]) -> ActionResult:
+ """Execute a method action"""
+ try:
+ if methodName not in methods:
+ raise ValueError(f"Unknown method: {methodName}")
+
+ method = methods[methodName]
+ if actionName not in method['actions']:
+ raise ValueError(f"Unknown action: {actionName} for method {methodName}")
+
+ action = method['actions'][actionName]
+
+ # Execute the action
+ return await action['method'](parameters)
+
+ except Exception as e:
+ logger.error(f"Error executing method {methodName}.{actionName}: {str(e)}")
+ raise
+
+ async def executeSingleAction(self, action: TaskAction, workflow: ChatWorkflow, taskStep: TaskStep,
+ taskIndex: int = None, actionIndex: int = None, totalActions: int = None) -> ActionResult:
+ """Execute a single action and return ActionResult with enhanced document processing"""
+ try:
+ # Check workflow status before executing action
+ self._checkWorkflowStopped(workflow)
+
+ # Use passed indices or fallback to '?'
+ taskNum = taskIndex if taskIndex is not None else '?'
+ actionNum = actionIndex if actionIndex is not None else '?'
+
+ logger.info(f"=== TASK {taskNum} ACTION {actionNum}: {action.execMethod}.{action.execAction} ===")
+
+ # Log input parameters
+ inputDocs = action.execParameters.get('documentList', [])
+ inputConnections = action.execParameters.get('connections', [])
+ logger.info(f"Input documents: {inputDocs} (type: {type(inputDocs)})")
+ if inputConnections:
+ logger.info(f"Input connections: {inputConnections}")
+
+ # Log all action parameters for debugging
+ logger.info(f"All action parameters: {action.execParameters}")
+
+ enhancedParameters = action.execParameters.copy()
+ if action.expectedDocumentFormats:
+ enhancedParameters['expectedDocumentFormats'] = action.expectedDocumentFormats
+ logger.info(f"Expected formats: {action.expectedDocumentFormats}")
+
+ # Check workflow status before executing the action
+ self._checkWorkflowStopped(workflow)
+
+ result = await self.executeAction(
+ methodName=action.execMethod,
+ actionName=action.execAction,
+ parameters=enhancedParameters
+ )
+ resultLabel = action.execResultLabel
+
+ # Trace action result with full document metadata
+ actionResultTrace = {
+ "method": action.execMethod,
+ "action": action.execAction,
+ "success": result.success,
+ "error": result.error,
+ "resultLabel": resultLabel,
+ "documentsCount": len(result.documents) if result.documents else 0
+ }
+
+ # Add full document metadata if documents exist
+ if result.documents:
+ actionResultTrace["documents"] = []
+ for doc in result.documents:
+ docMetadata = {
+ "name": getattr(doc, 'documentName', 'Unknown'),
+ "mimeType": getattr(doc, 'mimeType', 'Unknown'),
+ "size": getattr(doc, 'size', 'Unknown'),
+ "created": getattr(doc, 'created', 'Unknown'),
+ "modified": getattr(doc, 'modified', 'Unknown'),
+ "typeGroup": getattr(doc, 'typeGroup', 'Unknown'),
+ "documentId": getattr(doc, 'documentId', 'Unknown'),
+ "reference": getattr(doc, 'reference', 'Unknown')
+ }
+ # Remove 'Unknown' values to keep it clean
+ docMetadata = {k: v for k, v in docMetadata.items() if v != 'Unknown'}
+ actionResultTrace["documents"].append(docMetadata)
+
+ self._writeTraceLog("Action Result", actionResultTrace)
+
+ # Process action result
+ if result.success:
+ action.setSuccess()
+ # Extract result text from ALL documents using generation service
+ action.result = self._extractResultText(result)
+ # Preserve the action's execResultLabel for document routing
+ # Action methods should NOT return resultLabel - this is managed by the action handler
+ if not action.execResultLabel:
+ logger.warning(f"Action {action.execMethod}.{action.execAction} has no execResultLabel set")
+
+ # Log action results
+ logger.info(f"Action completed successfully")
+
+ if result.documents:
+ logger.info(f"Output documents ({len(result.documents)}):")
+ for i, doc in enumerate(result.documents):
+ logger.info(f" {i+1}. {doc.documentName}")
+ else:
+ logger.info("Output: No documents created")
+ else:
+ action.setError(result.error or "Action execution failed")
+ logger.error(f"Action failed: {result.error}")
+
+ # Create database log entry for action failure
+ self.services.interfaceDbChat.createLog({
+ "workflowId": workflow.id,
+ "message": f"β **Task {taskNum}**\n\nβ **Action {actionNum}/{totalActions}** failed: {result.error}",
+ "type": "error"
+ })
+
+ # Log action summary
+ logger.info(f"=== TASK {taskNum} ACTION {actionNum} COMPLETED ===")
+
+ # Create action completion message with documents (generic)
+ await self._createActionCompletionMessage(action, result, workflow, taskStep, taskIndex, actionIndex, totalActions)
+
+ return ActionResult(
+ success=result.success,
+ documents=result.documents, # Return original ActionDocument objects
+ resultLabel=action.execResultLabel, # Always use action's execResultLabel
+ error=result.error or ""
+ )
+ except Exception as e:
+ logger.error(f"Error executing single action: {str(e)}")
+ action.setError(str(e))
+ return ActionResult(
+ success=False,
+ documents=[], # Empty documents for error case
+ resultLabel=action.execResultLabel,
+ error=str(e)
+ )
+
+ def _extractResultText(self, result: ActionResult) -> str:
+ """Extract result text from ActionResult documents"""
+ if not result.success or not result.documents:
+ return ""
+
+ # Extract text directly from ActionDocument objects
+ resultParts = []
+ for doc in result.documents:
+ if hasattr(doc, 'documentData') and doc.documentData:
+ resultParts.append(str(doc.documentData))
+
+ # Join all document results with separators
+ return "\n\n---\n\n".join(resultParts) if resultParts else ""
+
+ async def _createActionCompletionMessage(self, action: TaskAction, result: ActionResult, workflow: ChatWorkflow,
+ taskStep: TaskStep, taskIndex: int, actionIndex: int, totalActions: int):
+ """Create action completion message with documents (generic)"""
+ try:
+ # Convert ActionDocument objects to ChatDocument objects for message creation
+ createdDocuments = []
+ if result.documents:
+ createdDocuments = self.services.generation.createDocumentsFromActionResult(result, action, workflow, None)
+
+ # Create action message using message creator
+ from modules.workflows.processing.core.messageCreator import MessageCreator
+ messageCreator = MessageCreator(self.services)
+
+ await messageCreator.createActionMessage(
+ action=action,
+ result=result,
+ workflow=workflow,
+ resultLabel=action.execResultLabel,
+ createdDocuments=createdDocuments,
+ taskStep=taskStep,
+ taskIndex=taskIndex,
+ actionIndex=actionIndex,
+ totalActions=totalActions
+ )
+ except Exception as e:
+ logger.error(f"Error creating action completion message: {str(e)}")
+
+ def _writeTraceLog(self, contextText: str, data: Any) -> None:
+ """Write trace data to configured trace file if in debug mode"""
+ try:
+ import os
+ import json
+ from datetime import datetime, UTC
+
+ # Only write if logger is in debug mode
+ if logger.level > logging.DEBUG:
+ return
+
+ # Get log directory from configuration
+ logDir = self.services.utils.configGet("APP_LOGGING_LOG_DIR", "./")
+ if not os.path.isabs(logDir):
+ # If relative path, make it relative to the gateway directory
+ gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
+ logDir = os.path.join(gatewayDir, logDir)
+
+ # Ensure log directory exists
+ os.makedirs(logDir, exist_ok=True)
+
+ # Create trace file path
+ traceFile = os.path.join(logDir, "log_trace.log")
+
+ # Format the trace entry
+ timestamp = datetime.fromtimestamp(self.services.utils.getUtcTimestamp(), UTC).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
+ traceEntry = f"[{timestamp}] {contextText}\n"
+
+ # Add data if provided - show full content without truncation
+ if data is not None:
+ if isinstance(data, (dict, list)):
+ # Use ensure_ascii=False to preserve Unicode characters and indent=2 for readability
+ traceEntry += f"Data: {json.dumps(data, indent=2, default=str, ensure_ascii=False)}\n"
+ else:
+ # For string data, show full content without truncation
+ traceEntry += f"Data: {str(data)}\n"
+
+ traceEntry += "-" * 80 + "\n\n"
+
+ # Write to trace file
+ with open(traceFile, "a", encoding="utf-8") as f:
+ f.write(traceEntry)
+
+ except Exception as e:
+ # Don't log trace errors to avoid recursion
+ pass
diff --git a/modules/workflows/processing/core/messageCreator.py b/modules/workflows/processing/core/messageCreator.py
new file mode 100644
index 00000000..ce4cabdf
--- /dev/null
+++ b/modules/workflows/processing/core/messageCreator.py
@@ -0,0 +1,361 @@
+# messageCreator.py
+# Generic message creation for all workflow phases
+
+import logging
+from typing import Dict, Any, Optional, List
+from modules.datamodels.datamodelWorkflow import TaskPlan, TaskStep, ActionResult, ReviewResult
+from modules.datamodels.datamodelChat import ChatWorkflow
+
+logger = logging.getLogger(__name__)
+
+class MessageCreator:
+ """Handles creation of all workflow messages"""
+
+ def __init__(self, services):
+ self.services = services
+
+ def _checkWorkflowStopped(self, workflow):
+ """Check if workflow has been stopped by user and raise exception if so"""
+ try:
+ # Get the current workflow status from the database to avoid stale data
+ current_workflow = self.services.interfaceDbChat.getWorkflow(workflow.id)
+ if current_workflow and current_workflow.status == "stopped":
+ logger.info("Workflow stopped by user, aborting message creation")
+ raise Exception("Workflow was stopped by user")
+ except Exception as e:
+ # If we can't get the current status due to other database issues, fall back to the in-memory object
+ logger.warning(f"Could not check current workflow status from database: {str(e)}")
+ if workflow and workflow.status == "stopped":
+ logger.info("Workflow stopped by user (from in-memory object), aborting message creation")
+ raise Exception("Workflow was stopped by user")
+
+ async def createTaskPlanMessage(self, taskPlan: TaskPlan, workflow: ChatWorkflow):
+ """Create a chat message containing the task plan with user-friendly messages"""
+ try:
+ # Check workflow status before creating message
+ self._checkWorkflowStopped(workflow)
+
+ # Build task plan summary
+ taskSummary = f"π **Task Plan**\n\n"
+
+ # Get overall user message from task plan if available
+ overallMessage = taskPlan.userMessage
+ if overallMessage:
+ taskSummary += f"{overallMessage}\n\n"
+
+ # Add each task with its user message
+ for i, task in enumerate(taskPlan.tasks):
+ if task.userMessage:
+ taskSummary += f"π¬ {task.userMessage}\n"
+ taskSummary += "\n"
+
+ # Create workflow message
+ messageData = {
+ "workflowId": workflow.id,
+ "role": "assistant",
+ "message": taskSummary,
+ "status": "step",
+ "sequenceNr": len(workflow.messages) + 1,
+ "publishedAt": self.services.utils.getUtcTimestamp(),
+ "documentsLabel": "task_plan",
+ "documents": [],
+ # Add workflow context fields - use current workflow round instead of hardcoded 1
+ "roundNumber": workflow.currentRound, # Use current workflow round
+ "taskNumber": 1, # Task plan is before individual tasks; to keep 1, that UI not filtering the message
+ "actionNumber": 0,
+ # Add task progress status
+ "taskProgress": "pending"
+ }
+
+ message = self.services.interfaceDbChat.createMessage(messageData)
+ if message:
+ workflow.messages.append(message)
+ logger.info("Task plan message created successfully")
+ except Exception as e:
+ logger.error(f"Error creating task plan message: {str(e)}")
+
+ async def createTaskStartMessage(self, taskStep: TaskStep, workflow: ChatWorkflow, taskIndex: int, totalTasks: int):
+ """Create a task start message for the user"""
+ try:
+ # Check workflow status before creating message
+ self._checkWorkflowStopped(workflow)
+
+ # Create a task start message for the user
+ taskProgress = f"{taskIndex}/{totalTasks}" if totalTasks is not None else str(taskIndex)
+ taskStartMessage = {
+ "workflowId": workflow.id,
+ "role": "assistant",
+ "message": f"π **Task {taskProgress}**",
+ "status": "step",
+ "sequenceNr": len(workflow.messages) + 1,
+ "publishedAt": self.services.utils.getUtcTimestamp(),
+ "documentsLabel": f"task_{taskIndex}_start",
+ "documents": [],
+ # Add workflow context fields
+ "roundNumber": workflow.currentRound, # Use current workflow round
+ "taskNumber": taskIndex,
+ "actionNumber": 0,
+ # Add task progress status
+ "taskProgress": "running"
+ }
+
+ # Add user-friendly message if available
+ if taskStep.userMessage:
+ taskStartMessage["message"] += f"\n\n㪠{taskStep.userMessage}"
+
+ message = self.services.interfaceDbChat.createMessage(taskStartMessage)
+ if message:
+ workflow.messages.append(message)
+ logger.info(f"Task start message created for task {taskIndex}")
+ except Exception as e:
+ logger.error(f"Error creating task start message: {str(e)}")
+
+ async def createActionMessage(self, action, result: ActionResult, workflow: ChatWorkflow, resultLabel: str = None,
+ createdDocuments: List = None, taskStep: TaskStep = None,
+ taskIndex: int = None, actionIndex: int = None, totalActions: int = None):
+ """Create and store a message for the action result in the workflow with enhanced document processing"""
+ try:
+ # Check workflow status before creating action message
+ self._checkWorkflowStopped(workflow)
+
+ if resultLabel is None:
+ resultLabel = action.execResultLabel
+
+ # Log delivered documents
+ if createdDocuments:
+ logger.info(f"Result label: {resultLabel} - {len(createdDocuments)} documents")
+ else:
+ logger.info(f"Result label: {resultLabel} - No documents")
+
+ # Get current workflow context and stats
+ workflowContext = self.services.workflow.getWorkflowContext()
+ workflowStats = self.services.workflow.getWorkflowStats()
+
+ # Create a more meaningful message that includes task context
+ taskObjective = taskStep.objective if taskStep else 'Unknown task'
+
+ # Extract round, task, and action numbers from resultLabel first, then fallback to workflow context
+ currentRound = self._extractRoundNumberFromLabel(resultLabel) if resultLabel else workflowContext.get('currentRound', 0)
+ currentTask = self._extractTaskNumberFromLabel(resultLabel) if resultLabel else (taskIndex if taskIndex is not None else workflowContext.get('currentTask', 0))
+ totalTasks = workflowStats.get('totalTasks', 0)
+ currentAction = self._extractActionNumberFromLabel(resultLabel) if resultLabel else (actionIndex if actionIndex is not None else workflowContext.get('currentAction', 0))
+ totalActions = totalActions if totalActions is not None else workflowStats.get('totalActions', 0)
+
+ # Debug logging for round number extraction
+ logger.info(f"Action message round number extraction: resultLabel='{resultLabel}', extractedRound={currentRound}, workflowRound={workflowContext.get('currentRound', 0)}")
+
+ # Build a user-friendly message based on success/failure
+ if result.success:
+ messageText = f"**Action {currentAction}/{totalActions} ({action.execMethod}.{action.execAction})**\n\n"
+ messageText += f"β
{taskObjective}\n\n"
+ else:
+ # β οΈ FAILURE MESSAGE - Show error details to user
+ errorDetails = result.error if result.error else "Unknown error occurred"
+ messageText = f"**Action {currentAction}/{totalActions} ({action.execMethod}.{action.execAction})**\n\n"
+ messageText += f"β {taskObjective}\n\n"
+ messageText += f"{errorDetails}\n\n"
+
+ messageData = {
+ "workflowId": workflow.id,
+ "role": "assistant",
+ "message": messageText,
+ "status": "step",
+ "sequenceNr": len(workflow.messages) + 1,
+ "publishedAt": self.services.utils.getUtcTimestamp(),
+ "actionId": action.id,
+ "actionMethod": action.execMethod,
+ "actionName": action.execAction,
+ "documentsLabel": resultLabel,
+ "documents": createdDocuments,
+ # Add workflow context fields - extract from resultLabel to match document reference
+ "roundNumber": currentRound,
+ "taskNumber": currentTask,
+ "actionNumber": currentAction,
+ "actionProgress": "success" if result.success else "fail"
+ }
+
+ # Add debugging for error messages
+ if not result.success:
+ logger.info(f"Creating ERROR message: {messageText}")
+ logger.info(f"Message data: {messageData}")
+
+ message = self.services.interfaceDbChat.createMessage(messageData)
+ if message:
+ workflow.messages.append(message)
+ logger.info(f"Message created: {action.execMethod}.{action.execAction}")
+ return message
+ else:
+ logger.error(f"Failed to create workflow message for action {action.execMethod}.{action.execAction}")
+ return None
+ except Exception as e:
+ logger.error(f"Error creating action message: {str(e)}")
+ return None
+
+ async def createTaskCompletionMessage(self, taskStep: TaskStep, workflow: ChatWorkflow, taskIndex: int,
+ totalTasks: int, reviewResult: ReviewResult):
+ """Create a task completion message for the user"""
+ try:
+ # Check workflow status before creating message
+ self._checkWorkflowStopped(workflow)
+
+ # Create a task completion message for the user
+ taskProgress = f"{taskIndex}/{totalTasks}" if totalTasks is not None else str(taskIndex)
+
+ # Enhanced completion message with criteria details
+ completionMessage = f"π― **Task {taskProgress}**\n\nβ
{reviewResult.reason or 'Task completed successfully'}"
+
+ # Add criteria status if available
+ if hasattr(reviewResult, 'met_criteria') and reviewResult.met_criteria:
+ for criterion in reviewResult.met_criteria:
+ completionMessage += f"\nβ’ {criterion}"
+
+ if hasattr(reviewResult, 'quality_score'):
+ completionMessage += f"\nπ Score {reviewResult.quality_score}/10"
+
+ taskCompletionMessage = {
+ "workflowId": workflow.id,
+ "role": "assistant",
+ "message": completionMessage,
+ "status": "step",
+ "sequenceNr": len(workflow.messages) + 1,
+ "publishedAt": self.services.utils.getUtcTimestamp(),
+ "documentsLabel": f"task_{taskIndex}_completion",
+ "documents": [],
+ # Add workflow context fields
+ "roundNumber": workflow.currentRound, # Use current workflow round
+ "taskNumber": taskIndex,
+ "actionNumber": 0,
+ # Add task progress status
+ "taskProgress": "success"
+ }
+
+ message = self.services.interfaceDbChat.createMessage(taskCompletionMessage)
+ if message:
+ workflow.messages.append(message)
+ logger.info(f"Task completion message created for task {taskIndex}")
+ except Exception as e:
+ logger.error(f"Error creating task completion message: {str(e)}")
+
+ async def createRetryMessage(self, taskStep: TaskStep, workflow: ChatWorkflow, taskIndex: int, reviewResult: ReviewResult):
+ """Create a retry message for the user"""
+ try:
+ # Check workflow status before creating message
+ self._checkWorkflowStopped(workflow)
+
+ # Create retry message for user
+ retryMessage = {
+ "workflowId": workflow.id,
+ "role": "assistant",
+ "message": f"π **Task {taskIndex}** needs retry: {reviewResult.improvements}",
+ "status": "step",
+ "sequenceNr": len(workflow.messages) + 1,
+ "publishedAt": self.services.utils.getUtcTimestamp(),
+ "documentsLabel": f"task_{taskIndex}_retry",
+ "documents": [],
+ "roundNumber": workflow.currentRound,
+ "taskNumber": taskIndex,
+ "actionNumber": 0,
+ "taskProgress": "retry"
+ }
+
+ message = self.services.interfaceDbChat.createMessage(retryMessage)
+ if message:
+ workflow.messages.append(message)
+ logger.info(f"Retry message created for task {taskIndex}")
+ except Exception as e:
+ logger.error(f"Error creating retry message: {str(e)}")
+
+ async def createErrorMessage(self, taskStep: TaskStep, workflow: ChatWorkflow, taskIndex: int, errorDetails: str):
+ """Create an error message for the user"""
+ try:
+ # Check workflow status before creating message
+ self._checkWorkflowStopped(workflow)
+
+ # Create user-facing error message for task failure
+ errorMessage = f"**Task {taskIndex}**\n\nβ '{taskStep.objective}' failed\n\n"
+
+ # Add specific error details if available
+ if errorDetails:
+ errorMessage += f"{errorDetails}\n\n"
+
+ # Create workflow message for user
+ messageData = {
+ "workflowId": workflow.id,
+ "role": "assistant",
+ "message": errorMessage,
+ "status": "step",
+ "sequenceNr": len(workflow.messages) + 1,
+ "publishedAt": self.services.utils.getUtcTimestamp(),
+ "actionId": None,
+ "actionMethod": "task",
+ "actionName": "task_error",
+ "documentsLabel": None,
+ "documents": [],
+ # Add workflow context fields
+ "roundNumber": workflow.currentRound, # Use current workflow round
+ "taskNumber": taskIndex,
+ "actionNumber": 0,
+ # Add task progress status
+ "taskProgress": "fail"
+ }
+
+ message = self.services.interfaceDbChat.createMessage(messageData)
+ if message:
+ workflow.messages.append(message)
+ logger.info(f"Error message created for task {taskIndex}")
+ except Exception as e:
+ logger.error(f"Error creating error message: {str(e)}")
+
+ def _extractRoundNumberFromLabel(self, label: str) -> int:
+ """Extract round number from a document label like 'round1_task1_action1_diagram_analysis'"""
+ try:
+ if not label or not isinstance(label, str):
+ return 0
+
+ # Parse label format: round{round}_task{task}_action{action}_{context}
+ if label.startswith('round'):
+ roundPart = label.split('_')[0] # Get 'round1' part
+ if roundPart.startswith('round'):
+ roundNumber = roundPart[5:] # Remove 'round' prefix
+ return int(roundNumber)
+
+ return 0
+ except Exception as e:
+ logger.warning(f"Could not extract round number from label '{label}': {str(e)}")
+ return 0
+
+ def _extractTaskNumberFromLabel(self, label: str) -> int:
+ """Extract task number from a document label like 'round1_task1_action1_diagram_analysis'"""
+ try:
+ if not label or not isinstance(label, str):
+ return 0
+
+ # Parse label format: round{round}_task{task}_action{action}_{context}
+ if '_task' in label:
+ taskPart = label.split('_task')[1]
+ if taskPart and '_' in taskPart:
+ taskNumber = taskPart.split('_')[0]
+ return int(taskNumber)
+
+ return 0
+ except Exception as e:
+ logger.warning(f"Could not extract task number from label '{label}': {str(e)}")
+ return 0
+
+ def _extractActionNumberFromLabel(self, label: str) -> int:
+ """Extract action number from a document label like 'round1_task1_action1_diagram_analysis'"""
+ try:
+ if not label or not isinstance(label, str):
+ return 0
+
+ # Parse label format: round{round}_task{task}_action{action}_{context}
+ if '_action' in label:
+ actionPart = label.split('_action')[1]
+ if actionPart and '_' in actionPart:
+ actionNumber = actionPart.split('_')[0]
+ return int(actionNumber)
+
+ return 0
+ except Exception as e:
+ logger.warning(f"Could not extract action number from label '{label}': {str(e)}")
+ return 0
diff --git a/modules/workflows/processing/core/taskPlanner.py b/modules/workflows/processing/core/taskPlanner.py
new file mode 100644
index 00000000..82b9cf9e
--- /dev/null
+++ b/modules/workflows/processing/core/taskPlanner.py
@@ -0,0 +1,311 @@
+# taskPlanner.py
+# Task planning functionality for workflows
+
+import json
+import logging
+from typing import Dict, Any
+from modules.datamodels.datamodelWorkflow import TaskStep, TaskContext, TaskPlan
+from modules.datamodels.datamodelAi import AiCallOptions, OperationType, ProcessingMode, Priority
+from modules.workflows.processing.shared.promptFactoryPlaceholders import (
+ createTaskPlanningPromptTemplate,
+ extractUserPrompt,
+ extractAvailableDocuments,
+ extractWorkflowHistory
+)
+
+logger = logging.getLogger(__name__)
+
+class TaskPlanner:
+ """Handles task planning for workflows"""
+
+ def __init__(self, services):
+ self.services = services
+
+ def _checkWorkflowStopped(self, workflow):
+ """Check if workflow has been stopped by user and raise exception if so"""
+ try:
+ # Get the current workflow status from the database to avoid stale data
+ current_workflow = self.services.interfaceDbChat.getWorkflow(workflow.id)
+ if current_workflow and current_workflow.status == "stopped":
+ logger.info("Workflow stopped by user, aborting task planning")
+ raise Exception("Workflow was stopped by user")
+ except Exception as e:
+ # If we can't get the current status due to other database issues, fall back to the in-memory object
+ logger.warning(f"Could not check current workflow status from database: {str(e)}")
+ if workflow and workflow.status == "stopped":
+ logger.info("Workflow stopped by user (from in-memory object), aborting task planning")
+ raise Exception("Workflow was stopped by user")
+
+ async def generateTaskPlan(self, userInput: str, workflow) -> TaskPlan:
+ """Generate a high-level task plan for the workflow"""
+ try:
+ # Check workflow status before generating task plan
+ self._checkWorkflowStopped(workflow)
+
+ logger.info(f"=== STARTING TASK PLAN GENERATION ===")
+ logger.info(f"Workflow ID: {workflow.id}")
+ logger.info(f"User Input: {userInput}")
+
+ # Check workflow status before calling AI service
+ self._checkWorkflowStopped(workflow)
+
+ # Create proper context object for task planning
+ # For task planning, we need to create a minimal TaskStep since TaskContext requires it
+ planningTaskStep = TaskStep(
+ id="planning",
+ objective=userInput,
+ dependencies=[],
+ success_criteria=[],
+ estimated_complexity="medium"
+ )
+
+ taskPlanningContext = TaskContext(
+ task_step=planningTaskStep,
+ workflow=workflow,
+ workflow_id=workflow.id,
+ available_documents=None,
+ available_connections=None,
+ previous_results=[],
+ previous_handover=None,
+ improvements=[],
+ retry_count=0,
+ previous_action_results=[],
+ previous_review_result=None,
+ is_regeneration=False,
+ failure_patterns=[],
+ failed_actions=[],
+ successful_actions=[],
+ criteria_progress={
+ 'met_criteria': set(),
+ 'unmet_criteria': set(),
+ 'attempt_history': []
+ }
+ )
+
+ # Generate the task planning prompt with placeholders
+ taskPlanningPromptTemplate = createTaskPlanningPromptTemplate()
+
+ # Extract content for placeholders
+ userPrompt = extractUserPrompt(taskPlanningContext)
+ availableDocuments = extractAvailableDocuments(taskPlanningContext)
+ workflowHistory = extractWorkflowHistory(self.services, taskPlanningContext)
+
+ # Create placeholders dictionary
+ placeholders = {
+ "USER_PROMPT": userPrompt,
+ "AVAILABLE_DOCUMENTS": availableDocuments,
+ "WORKFLOW_HISTORY": workflowHistory
+ }
+
+ # Log task planning prompt sent to AI
+ logger.info("=== TASK PLANNING PROMPT SENT TO AI ===")
+ # Trace task planning prompt
+ self._writeTraceLog("Task Plan Prompt", taskPlanningPromptTemplate)
+ self._writeTraceLog("Task Plan Placeholders", placeholders)
+
+ # Centralized AI call: Task planning (quality, detailed) with placeholders
+ options = AiCallOptions(
+ operationType=OperationType.GENERATE_PLAN,
+ priority=Priority.QUALITY,
+ compressPrompt=False,
+ compressContext=False,
+ processingMode=ProcessingMode.DETAILED,
+ maxCost=0.10,
+ maxProcessingTime=30
+ )
+
+ prompt = await self.services.ai.callAi(
+ prompt=taskPlanningPromptTemplate,
+ placeholders=placeholders,
+ options=options
+ )
+
+ # Check if AI response is valid
+ if not prompt:
+ raise ValueError("AI service returned no response for task planning")
+
+ # Log task planning response received
+ logger.info("=== TASK PLANNING AI RESPONSE RECEIVED ===")
+ logger.info(f"Response length: {len(prompt) if prompt else 0}")
+ # Trace task planning response
+ self._writeTraceLog("Task Plan Response", prompt)
+
+ # Parse task plan response
+ try:
+ jsonStart = prompt.find('{')
+ jsonEnd = prompt.rfind('}') + 1
+ if jsonStart == -1 or jsonEnd == 0:
+ raise ValueError("No JSON found in response")
+ jsonStr = prompt[jsonStart:jsonEnd]
+ taskPlanDict = json.loads(jsonStr)
+
+ if 'tasks' not in taskPlanDict:
+ raise ValueError("Task plan missing 'tasks' field")
+ except Exception as e:
+ logger.error(f"Error parsing task plan response: {str(e)}")
+ taskPlanDict = {'tasks': []}
+
+ if not self._validateTaskPlan(taskPlanDict):
+ logger.error("Generated task plan failed validation")
+ logger.error(f"AI Response: {prompt}")
+ logger.error(f"Parsed Task Plan: {json.dumps(taskPlanDict, indent=2)}")
+ raise Exception("AI-generated task plan failed validation - AI is required for task planning")
+
+ if not taskPlanDict.get('tasks'):
+ raise ValueError("Task plan contains no tasks")
+
+ # LANGUAGE DETECTION: Determine user language once for the entire workflow
+ # Priority: 1. languageUserDetected from AI response, 2. service.user.language, 3. "en"
+ detectedLanguage = taskPlanDict.get('languageUserDetected', '').strip()
+ serviceUserLanguage = getattr(self.services.user, 'language', '') if self.services and self.services.user else ''
+
+ if detectedLanguage and len(detectedLanguage) == 2: # Valid language code like "en", "de", "fr"
+ userLanguage = detectedLanguage
+ logger.info(f"Using detected language from AI response: {userLanguage}")
+ elif serviceUserLanguage and len(serviceUserLanguage) == 2:
+ userLanguage = serviceUserLanguage
+ logger.info(f"Using language from service user object: {userLanguage}")
+ else:
+ userLanguage = "en"
+ logger.info(f"Using default language: {userLanguage}")
+
+ # Set the detected language in the service for use throughout the workflow
+ if self.services and self.services.user:
+ self.services.user.language = userLanguage
+ logger.info(f"Set workflow user language to: {userLanguage}")
+
+ tasks = []
+ for i, taskDict in enumerate(taskPlanDict.get('tasks', [])):
+ if not isinstance(taskDict, dict):
+ logger.warning(f"Skipping invalid task {i+1}: not a dictionary")
+ continue
+
+ # Map old 'description' field to new 'objective' field
+ if 'description' in taskDict and 'objective' not in taskDict:
+ taskDict['objective'] = taskDict.pop('description')
+
+ try:
+ task = TaskStep(**taskDict)
+ tasks.append(task)
+ except Exception as e:
+ logger.warning(f"Skipping invalid task {i+1}: {str(e)}")
+ continue
+
+ if not tasks:
+ raise ValueError("No valid tasks could be created from AI response")
+
+ taskPlan = TaskPlan(
+ overview=taskPlanDict.get('overview', ''),
+ tasks=tasks,
+ userMessage=taskPlanDict.get('userMessage', '')
+ )
+
+ logger.info(f"Task plan generated successfully with {len(tasks)} tasks")
+ logger.info(f"Workflow user language set to: {userLanguage}")
+
+ return taskPlan
+ except Exception as e:
+ logger.error(f"Error in generateTaskPlan: {str(e)}")
+ raise
+
+ def _validateTaskPlan(self, taskPlan: Dict[str, Any]) -> bool:
+ """Validate task plan structure"""
+ try:
+ if not isinstance(taskPlan, dict):
+ logger.error("Task plan is not a dictionary")
+ return False
+
+ if 'tasks' not in taskPlan or not isinstance(taskPlan['tasks'], list):
+ logger.error(f"Task plan missing 'tasks' field or not a list. Found: {type(taskPlan.get('tasks', 'MISSING'))}")
+ return False
+
+ # First pass: collect all task IDs to validate dependencies
+ taskIds = set()
+ for task in taskPlan['tasks']:
+ if not isinstance(task, dict):
+ logger.error(f"Task is not a dictionary: {type(task)}")
+ return False
+ if 'id' not in task:
+ logger.error(f"Task missing 'id' field: {task}")
+ return False
+ taskIds.add(task['id'])
+
+ # Second pass: validate each task
+ for i, task in enumerate(taskPlan['tasks']):
+ if not isinstance(task, dict):
+ logger.error(f"Task {i} is not a dictionary: {type(task)}")
+ return False
+
+ requiredFields = ['id', 'objective', 'success_criteria']
+ missingFields = [field for field in requiredFields if field not in task]
+ if missingFields:
+ logger.error(f"Task {i} missing required fields: {missingFields}")
+ return False
+
+ # Check for duplicate IDs (shouldn't happen after first pass, but safety check)
+ if task['id'] in taskIds and list(taskPlan['tasks']).count(task['id']) > 1:
+ logger.error(f"Task {i} has duplicate ID: {task['id']}")
+ return False
+
+ dependencies = task.get('dependencies', [])
+ if not isinstance(dependencies, list):
+ logger.error(f"Task {i} dependencies is not a list: {type(dependencies)}")
+ return False
+
+ for dep in dependencies:
+ if dep not in taskIds and dep != 'task_0':
+ logger.error(f"Task {i} has invalid dependency: {dep} (available: {list(taskIds) + ['task_0']})")
+ return False
+
+ logger.info(f"Task plan validation successful with {len(taskIds)} tasks")
+ return True
+
+ except Exception as e:
+ logger.error(f"Error validating task plan: {str(e)}")
+ return False
+
+ def _writeTraceLog(self, contextText: str, data: Any) -> None:
+ """Write trace data to configured trace file if in debug mode"""
+ try:
+ import os
+ from datetime import datetime, UTC
+
+ # Only write if logger is in debug mode
+ if logger.level > logging.DEBUG:
+ return
+
+ # Get log directory from configuration
+ logDir = self.services.utils.configGet("APP_LOGGING_LOG_DIR", "./")
+ if not os.path.isabs(logDir):
+ # If relative path, make it relative to the gateway directory
+ gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
+ logDir = os.path.join(gatewayDir, logDir)
+
+ # Ensure log directory exists
+ os.makedirs(logDir, exist_ok=True)
+
+ # Create trace file path
+ traceFile = os.path.join(logDir, "log_trace.log")
+
+ # Format the trace entry
+ timestamp = datetime.fromtimestamp(self.services.utils.getUtcTimestamp(), UTC).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
+ traceEntry = f"[{timestamp}] {contextText}\n"
+
+ # Add data if provided - show full content without truncation
+ if data is not None:
+ if isinstance(data, (dict, list)):
+ # Use ensure_ascii=False to preserve Unicode characters and indent=2 for readability
+ traceEntry += f"Data: {json.dumps(data, indent=2, default=str, ensure_ascii=False)}\n"
+ else:
+ # For string data, show full content without truncation
+ traceEntry += f"Data: {str(data)}\n"
+
+ traceEntry += "-" * 80 + "\n\n"
+
+ # Write to trace file
+ with open(traceFile, "a", encoding="utf-8") as f:
+ f.write(traceEntry)
+
+ except Exception as e:
+ # Don't log trace errors to avoid recursion
+ pass
diff --git a/modules/workflows/processing/core/validator.py b/modules/workflows/processing/core/validator.py
new file mode 100644
index 00000000..ef093056
--- /dev/null
+++ b/modules/workflows/processing/core/validator.py
@@ -0,0 +1,104 @@
+# validator.py
+# Validation logic for workflows
+
+import logging
+from typing import Dict, Any, List
+
+logger = logging.getLogger(__name__)
+
+class WorkflowValidator:
+ """Handles validation of workflow components"""
+
+ def __init__(self, services):
+ self.services = services
+
+ def validateTask(self, taskPlan: Dict[str, Any]) -> bool:
+ """Validate task plan structure"""
+ try:
+ if not isinstance(taskPlan, dict):
+ logger.error("Task plan is not a dictionary")
+ return False
+
+ if 'tasks' not in taskPlan or not isinstance(taskPlan['tasks'], list):
+ logger.error(f"Task plan missing 'tasks' field or not a list. Found: {type(taskPlan.get('tasks', 'MISSING'))}")
+ return False
+
+ # First pass: collect all task IDs to validate dependencies
+ taskIds = set()
+ for task in taskPlan['tasks']:
+ if not isinstance(task, dict):
+ logger.error(f"Task is not a dictionary: {type(task)}")
+ return False
+ if 'id' not in task:
+ logger.error(f"Task missing 'id' field: {task}")
+ return False
+ taskIds.add(task['id'])
+
+ # Second pass: validate each task
+ for i, task in enumerate(taskPlan['tasks']):
+ if not isinstance(task, dict):
+ logger.error(f"Task {i} is not a dictionary: {type(task)}")
+ return False
+
+ requiredFields = ['id', 'objective', 'success_criteria']
+ missingFields = [field for field in requiredFields if field not in task]
+ if missingFields:
+ logger.error(f"Task {i} missing required fields: {missingFields}")
+ return False
+
+ # Check for duplicate IDs (shouldn't happen after first pass, but safety check)
+ if task['id'] in taskIds and list(taskPlan['tasks']).count(task['id']) > 1:
+ logger.error(f"Task {i} has duplicate ID: {task['id']}")
+ return False
+
+ dependencies = task.get('dependencies', [])
+ if not isinstance(dependencies, list):
+ logger.error(f"Task {i} dependencies is not a list: {type(dependencies)}")
+ return False
+
+ for dep in dependencies:
+ if dep not in taskIds and dep != 'task_0':
+ logger.error(f"Task {i} has invalid dependency: {dep} (available: {list(taskIds) + ['task_0']})")
+ return False
+
+ logger.info(f"Task plan validation successful with {len(taskIds)} tasks")
+ return True
+
+ except Exception as e:
+ logger.error(f"Error validating task plan: {str(e)}")
+ return False
+
+ def validateAction(self, actions: List[Dict[str, Any]], context) -> bool:
+ """Validate action structure"""
+ try:
+ if not isinstance(actions, list):
+ logger.error("Actions must be a list")
+ return False
+ if len(actions) == 0:
+ logger.warning("No actions generated")
+ return False
+ for i, action in enumerate(actions):
+ if not isinstance(action, dict):
+ logger.error(f"Action {i} must be a dictionary")
+ return False
+ requiredFields = ['method', 'action', 'parameters', 'resultLabel']
+ missingFields = []
+ for field in requiredFields:
+ if field not in action or not action[field]:
+ missingFields.append(field)
+ if missingFields:
+ logger.error(f"Action {i} missing required fields: {missingFields}")
+ return False
+ resultLabel = action.get('resultLabel', '')
+ if not resultLabel.startswith('round'):
+ logger.error(f"Action {i} result label must start with 'round': {resultLabel}")
+ return False
+ parameters = action.get('parameters', {})
+ if not isinstance(parameters, dict):
+ logger.error(f"Action {i} parameters must be a dictionary")
+ return False
+ logger.info(f"Successfully validated {len(actions)} actions")
+ return True
+ except Exception as e:
+ logger.error(f"Error validating actions: {str(e)}")
+ return False
diff --git a/modules/workflows/processing/handlingTasks.py b/modules/workflows/processing/handlingTasks.py
deleted file mode 100644
index 7861608d..00000000
--- a/modules/workflows/processing/handlingTasks.py
+++ /dev/null
@@ -1,2191 +0,0 @@
-# handlingTasks.py
-# Refactored for clarity and consolidation
-
-import uuid
-import asyncio
-import logging
-import json
-import time
-from typing import Dict, Any, Optional, List, Union
-from datetime import datetime, UTC
-from modules.datamodels.datamodelWorkflow import (
- TaskStep,
- TaskContext,
- ReviewResult,
- TaskPlan,
- TaskResult,
- ReviewContext,
- TaskStatus,
- ActionResult,
- TaskAction
-)
-from modules.datamodels.datamodelChat import (
- WorkflowResult,
- ChatWorkflow,
- ChatMessage,
- ChatDocument
-)
-from modules.workflows.processing.executionState import TaskExecutionState
-from modules.workflows.processing.promptFactory import (
- createTaskPlanningPrompt,
- createActionDefinitionPrompt,
- createResultReviewPrompt,
- createActionSelectionPrompt,
- createActionParameterPrompt,
- createRefinementPrompt
-)
-from modules.workflows.processing.promptFactoryPlaceholders import (
- createTaskPlanningPromptTemplate,
- createActionDefinitionPromptTemplate,
- createActionSelectionPromptTemplate,
- createActionParameterPromptTemplate,
- createRefinementPromptTemplate,
- createResultReviewPromptTemplate,
- extractUserPrompt,
- extractAvailableDocuments,
- extractWorkflowHistory,
- extractAvailableMethods,
- extractUserLanguage,
- extractReviewContent
-)
-from modules.workflows.processing.promptFactory import methods, getEnhancedDocumentContext
-from modules.workflows.processing.executionState import should_continue
-from modules.datamodels.datamodelAi import AiCallOptions, OperationType, ProcessingMode, Priority
-
-logger = logging.getLogger(__name__)
-
-class WorkflowStoppedException(Exception):
- """Exception raised when a workflow is stopped by the user."""
- pass
-
-class HandlingTasks:
- def __init__(self, services, workflow=None):
- self.services = services
- self.workflow = workflow
-
- def _checkWorkflowStopped(self):
- """
- Check if workflow has been stopped by user and raise exception if so.
- This function centralizes all workflow stop checking logic to avoid code duplication.
- """
- try:
- # Get the current workflow status from the database to avoid stale data
- current_workflow = self.services.interfaceDbChat.getWorkflow(self.workflow.id)
- if current_workflow and current_workflow.status == "stopped":
- logger.info("Workflow stopped by user, aborting execution")
- raise WorkflowStoppedException("Workflow was stopped by user")
- except WorkflowStoppedException:
- # Re-raise the WorkflowStoppedException immediately
- raise
- except Exception as e:
- # If we can't get the current status due to other database issues, fall back to the in-memory object
- logger.warning(f"Could not check current workflow status from database: {str(e)}")
- if self.workflow and self.workflow.status == "stopped":
- logger.info("Workflow stopped by user (from in-memory object), aborting execution")
- raise WorkflowStoppedException("Workflow was stopped by user")
-
- async def generateTaskPlan(self, userInput: str, workflow) -> TaskPlan:
- """Generate a high-level task plan for the workflow."""
- try:
- # Check workflow status before generating task plan
- self._checkWorkflowStopped()
-
- logger.info(f"=== STARTING TASK PLAN GENERATION ===")
- logger.info(f"Workflow ID: {workflow.id}")
- logger.info(f"User Input: {userInput}")
-
- # Check workflow status before calling AI service
- self._checkWorkflowStopped()
-
- # Create proper context object for task planning
- # For task planning, we need to create a minimal TaskStep since TaskContext requires it
- planning_task_step = TaskStep(
- id="planning",
- objective=userInput,
- dependencies=[],
- success_criteria=[],
- estimated_complexity="medium"
- )
-
- task_planning_context = TaskContext(
- task_step=planning_task_step,
- workflow=workflow,
- workflow_id=workflow.id,
- available_documents=None,
- available_connections=None,
- previous_results=[],
- previous_handover=None,
- improvements=[],
- retry_count=0,
- previous_action_results=[],
- previous_review_result=None,
- is_regeneration=False,
- failure_patterns=[],
- failed_actions=[],
- successful_actions=[],
- criteria_progress={
- 'met_criteria': set(),
- 'unmet_criteria': set(),
- 'attempt_history': []
- }
- )
-
- # Generate the task planning prompt with placeholders
- task_planning_prompt_template = createTaskPlanningPromptTemplate()
-
- # Extract content for placeholders
- user_prompt = extractUserPrompt(task_planning_context)
- available_documents = extractAvailableDocuments(task_planning_context)
- workflow_history = extractWorkflowHistory(self.services, task_planning_context)
-
- # Create placeholders dictionary
- placeholders = {
- "USER_PROMPT": user_prompt,
- "AVAILABLE_DOCUMENTS": available_documents,
- "WORKFLOW_HISTORY": workflow_history
- }
-
- # Log task planning prompt sent to AI
- logger.info("=== TASK PLANNING PROMPT SENT TO AI ===")
- # Trace task planning prompt
- self.writeTraceLog("Task Plan Prompt", task_planning_prompt_template)
- self.writeTraceLog("Task Plan Placeholders", placeholders)
-
- # Centralized AI call: Task planning (quality, detailed) with placeholders
- options = AiCallOptions(
- operationType=OperationType.GENERATE_PLAN,
- priority=Priority.QUALITY,
- compressPrompt=False,
- compressContext=False,
- processingMode=ProcessingMode.DETAILED,
- maxCost=0.10,
- maxProcessingTime=30
- )
-
- prompt = await self.services.ai.callAi(
- prompt=task_planning_prompt_template,
- placeholders=placeholders,
- options=options
- )
-
- # Check if AI response is valid
- if not prompt:
- raise ValueError("AI service returned no response for task planning")
-
- # Log task planning response received
- logger.info("=== TASK PLANNING AI RESPONSE RECEIVED ===")
- logger.info(f"Response length: {len(prompt) if prompt else 0}")
- # Trace task planning response
- self.writeTraceLog("Task Plan Response", prompt)
-
- # Inline _parseTaskPlanResponse logic
- try:
- json_start = prompt.find('{')
- json_end = prompt.rfind('}') + 1
- if json_start == -1 or json_end == 0:
- raise ValueError("No JSON found in response")
- json_str = prompt[json_start:json_end]
- task_plan_dict = json.loads(json_str)
-
- if 'tasks' not in task_plan_dict:
- raise ValueError("Task plan missing 'tasks' field")
- except Exception as e:
- logger.error(f"Error parsing task plan response: {str(e)}")
- task_plan_dict = {'tasks': []}
-
- if not self._validateTaskPlan(task_plan_dict):
- logger.error("Generated task plan failed validation")
- logger.error(f"AI Response: {prompt}")
- logger.error(f"Parsed Task Plan: {json.dumps(task_plan_dict, indent=2)}")
- raise Exception("AI-generated task plan failed validation - AI is required for task planning")
-
- if not task_plan_dict.get('tasks'):
- raise ValueError("Task plan contains no tasks")
-
- # LANGUAGE DETECTION: Determine user language once for the entire workflow
- # Priority: 1. languageUserDetected from AI response, 2. service.user.language, 3. "en"
- detected_language = task_plan_dict.get('languageUserDetected', '').strip()
- service_user_language = getattr(self.services.user, 'language', '') if self.services and self.services.user else ''
-
- if detected_language and len(detected_language) == 2: # Valid language code like "en", "de", "fr"
- user_language = detected_language
- logger.info(f"Using detected language from AI response: {user_language}")
- elif service_user_language and len(service_user_language) == 2:
- user_language = service_user_language
- logger.info(f"Using language from service user object: {user_language}")
- else:
- user_language = "en"
- logger.info(f"Using default language: {user_language}")
-
- # Set the detected language in the service for use throughout the workflow
- if self.services and self.services.user:
- self.services.user.language = user_language
- logger.info(f"Set workflow user language to: {user_language}")
-
- tasks = []
- for i, task_dict in enumerate(task_plan_dict.get('tasks', [])):
- if not isinstance(task_dict, dict):
- logger.warning(f"Skipping invalid task {i+1}: not a dictionary")
- continue
-
- # Map old 'description' field to new 'objective' field
- if 'description' in task_dict and 'objective' not in task_dict:
- task_dict['objective'] = task_dict.pop('description')
-
- try:
- task = TaskStep(**task_dict)
- tasks.append(task)
- except Exception as e:
- logger.warning(f"Skipping invalid task {i+1}: {str(e)}")
- continue
-
- if not tasks:
- raise ValueError("No valid tasks could be created from AI response")
-
- task_plan = TaskPlan(
- overview=task_plan_dict.get('overview', ''),
- tasks=tasks,
- userMessage=task_plan_dict.get('userMessage', '')
- )
-
- # Set workflow totals for progress tracking
- total_tasks = len(tasks)
- if total_tasks == 0:
- raise ValueError("Task plan contains no valid tasks")
-
- self.setWorkflowTotals(total_tasks=total_tasks)
-
- logger.info(f"Task plan generated successfully with {len(tasks)} tasks")
- logger.info(f"Workflow user language set to: {user_language}")
-
- # PHASE 3: Create chat message containing the task plan
- await self.createTaskPlanMessage(task_plan, workflow)
-
- return task_plan
- except Exception as e:
- logger.error(f"Error in generateTaskPlan: {str(e)}")
- raise
-
- async def createTaskPlanMessage(self, task_plan: TaskPlan, workflow):
- """Create a chat message containing the task plan with user-friendly messages"""
- try:
- # Build task plan summary
- task_summary = f"π **Task Plan**\n\n"
-
- # Get overall user message from task plan if available
- overall_message = task_plan.userMessage
- if overall_message:
- task_summary += f"{overall_message}\n\n"
-
- # Add each task with its user message
- for i, task in enumerate(task_plan.tasks):
- if task.userMessage:
- task_summary += f"π¬ {task.userMessage}\n"
- task_summary += "\n"
-
-
- # Create workflow message
- message_data = {
- "workflowId": workflow.id,
- "role": "assistant",
- "message": task_summary,
- "status": "step",
- "sequenceNr": len(workflow.messages) + 1,
- "publishedAt": self.services.utils.getUtcTimestamp(),
- "documentsLabel": "task_plan",
- "documents": [],
- # Add workflow context fields - use current workflow round instead of hardcoded 1
- "roundNumber": workflow.currentRound, # Use current workflow round
- "taskNumber": 1, # Task plan is before individual tasks; to keep 1, that UI not filtering the message
- "actionNumber": 0,
- # Add task progress status
- "taskProgress": "pending"
- }
-
- message = self.services.interfaceDbChat.createMessage(message_data)
- if message:
- workflow.messages.append(message)
-
- async def createReactActionMessage(self, workflow, selection, step, max_steps, task_index, message_type, result=None, observation=None):
- """Create user-friendly messages for React workflow actions"""
- try:
- action = selection.get('action', {})
- method = action.get('method', '')
- action_name = action.get('name', '')
-
- # Get user language
- user_language = self.services.user.language if self.services and self.services.user else 'en'
-
- if message_type == "before":
- # Message BEFORE action execution
- user_message = await self.generateActionIntentionMessage(method, action_name, user_language)
- message_content = f"π **Step {step}/{max_steps}**\n\n{user_message}"
- status = "step"
- action_progress = "pending"
- documents_label = f"action_{step}_intention"
-
- elif message_type == "after":
- # Message AFTER action execution
- user_message = await self.generateActionResultMessage(method, action_name, result, observation, user_language)
- success_icon = "β
" if result and result.success else "β"
- message_content = f"{success_icon} **Step {step}/{max_steps} Complete**\n\n{user_message}"
- status = "step"
- action_progress = "success" if result and result.success else "fail"
- documents_label = observation.get('resultLabel') if observation else f"action_{step}_result"
- else:
- return
-
- # Create workflow message
- message_data = {
- "workflowId": workflow.id,
- "role": "assistant",
- "message": message_content,
- "status": status,
- "sequenceNr": len(workflow.messages) + 1,
- "publishedAt": self.services.utils.getUtcTimestamp(),
- "documentsLabel": documents_label,
- "documents": [],
- "roundNumber": workflow.currentRound,
- "taskNumber": task_index,
- "actionNumber": step,
- "actionProgress": action_progress
- }
-
- message = self.services.interfaceDbChat.createMessage(message_data)
- if message:
- workflow.messages.append(message)
-
- except Exception as e:
- logger.error(f"Error creating React action message: {str(e)}")
-
- async def generateActionIntentionMessage(self, method, action_name, user_language):
- """Generate user-friendly message explaining what action will do"""
- try:
- # Create a simple AI prompt to generate user-friendly action descriptions
- prompt = f"""Generate a brief, user-friendly message explaining what the {method}.{action_name} action will do.
-
-User language: {user_language}
-
-Examples:
-- For ai.process: "I'll analyze the content and provide insights"
-- For document.extract: "I'll extract the key information from the documents"
-- For document.generate: "I'll create a formatted report from the documents"
-- For outlook.composeEmail: "I'll compose an email based on your requirements"
-- For outlook.sendEmail: "I'll send the composed email"
-- For sharepoint.findDocumentPath: "I'll search for the requested documents"
-- For sharepoint.readDocuments: "I'll read the document contents"
-
-Return only the user-friendly message, no technical details."""
-
- # Call AI to generate user-friendly message
- response = await self.services.ai.callAi(
- prompt=prompt,
- options=AiCallOptions(
- operationType=OperationType.GENERATE_CONTENT,
- priority=Priority.SPEED,
- compressPrompt=True,
- maxCost=0.01,
- maxProcessingTime=5
- )
- )
-
- return response.strip() if response else f"Executing {method}.{action_name} action..."
-
- except Exception as e:
- logger.error(f"Error generating action intention message: {str(e)}")
- return f"Executing {method}.{action_name} action..."
-
- async def generateActionResultMessage(self, method, action_name, result, observation, user_language):
- """Generate user-friendly message explaining action results"""
- try:
- # Build result context
- result_context = ""
- if result and result.documents:
- doc_count = len(result.documents)
- result_context = f"Generated {doc_count} document(s)"
- elif observation and observation.get('documentsCount', 0) > 0:
- doc_count = observation.get('documentsCount', 0)
- result_context = f"Generated {doc_count} document(s)"
-
- # Create AI prompt for result message
- prompt = f"""Generate a brief, user-friendly message explaining the result of the {method}.{action_name} action.
-
-User language: {user_language}
-Success: {result.success if result else 'Unknown'}
-Result context: {result_context}
-
-Examples:
-- For successful ai.process: "Analysis complete! I've processed the content and generated insights."
-- For successful document.extract: "Extraction complete! I've extracted the key information from the documents."
-- For successful document.generate: "Report generated! I've created a formatted document with the requested content."
-- For successful outlook.composeEmail: "Email composed! I've prepared the email content for sending."
-- For successful outlook.sendEmail: "Email sent! The message has been delivered successfully."
-- For failed actions: "The action encountered an issue. Please check the details."
-
-Return only the user-friendly message, no technical details."""
-
- # Call AI to generate user-friendly result message
- response = await self.services.ai.callAi(
- prompt=prompt,
- options=AiCallOptions(
- operationType=OperationType.GENERATE_CONTENT,
- priority=Priority.SPEED,
- compressPrompt=True,
- maxCost=0.01,
- maxProcessingTime=5
- )
- )
-
- return response.strip() if response else f"{method}.{action_name} action completed"
-
- except Exception as e:
- logger.error(f"Error generating action result message: {str(e)}")
- return f"{method}.{action_name} action completed"
-
- async def generateTaskActions(self, task_step, workflow, previous_results=None, enhanced_context=None) -> List[TaskAction]:
- """Generate actions for a given task step."""
- try:
- # Check workflow status before generating actions
- self._checkWorkflowStopped()
-
- retry_info = f" (Retry #{enhanced_context.retry_count})" if enhanced_context and enhanced_context.retry_count > 0 else ""
- logger.info(f"Generating actions for task: {task_step.objective}{retry_info}")
-
- # Log criteria progress if this is a retry
- if enhanced_context and hasattr(enhanced_context, 'criteria_progress') and enhanced_context.criteria_progress is not None:
- progress = enhanced_context.criteria_progress
- logger.info(f"Retry attempt {enhanced_context.retry_count} - Criteria progress:")
- if progress.get('met_criteria'):
- logger.info(f" Met criteria: {', '.join(progress['met_criteria'])}")
- if progress.get('unmet_criteria'):
- logger.warning(f" Unmet criteria: {', '.join(progress['unmet_criteria'])}")
-
- # Show improvement trends
- if progress.get('attempt_history'):
- recent_attempts = progress['attempt_history'][-2:] # Last 2 attempts
- if len(recent_attempts) >= 2:
- prev_score = recent_attempts[0].get('quality_score', 0)
- curr_score = recent_attempts[1].get('quality_score', 0)
- if curr_score > prev_score:
- logger.info(f" Quality improving: {prev_score} -> {curr_score}")
- elif curr_score < prev_score:
- logger.warning(f" Quality declining: {prev_score} -> {curr_score}")
- else:
- logger.info(f" Quality stable: {curr_score}")
-
- # Enhanced retry context logging
- if enhanced_context and enhanced_context.retry_count > 0:
- logger.info("=== RETRY CONTEXT FOR ACTION GENERATION ===")
- logger.info(f"Retry Count: {enhanced_context.retry_count}")
- logger.debug(f"Previous Improvements: {enhanced_context.improvements}")
- logger.debug(f"Previous Review Result: {enhanced_context.previous_review_result}")
- logger.debug(f"Failure Patterns: {enhanced_context.failure_patterns}")
- logger.debug(f"Failed Actions: {enhanced_context.failed_actions}")
- logger.debug(f"Successful Actions: {enhanced_context.successful_actions}")
- logger.info("=== END RETRY CONTEXT ===")
-
- # Log that we're starting action generation
- logger.info("=== STARTING ACTION GENERATION ===")
-
- # Create proper context object for action definition
- if enhanced_context and isinstance(enhanced_context, TaskContext):
- # Use existing TaskContext if provided
- action_context = TaskContext(
- task_step=enhanced_context.task_step,
- workflow=enhanced_context.workflow,
- workflow_id=enhanced_context.workflow_id,
- available_documents=enhanced_context.available_documents,
- available_connections=enhanced_context.available_connections,
- previous_results=enhanced_context.previous_results or previous_results or [],
- previous_handover=enhanced_context.previous_handover,
- improvements=enhanced_context.improvements or [],
- retry_count=enhanced_context.retry_count or 0,
- previous_action_results=enhanced_context.previous_action_results or [],
- previous_review_result=enhanced_context.previous_review_result,
- is_regeneration=enhanced_context.is_regeneration or False,
- failure_patterns=enhanced_context.failure_patterns or [],
- failed_actions=enhanced_context.failed_actions or [],
- successful_actions=enhanced_context.successful_actions or [],
- criteria_progress=enhanced_context.criteria_progress
- )
- else:
- # Create new context from scratch
- action_context = TaskContext(
- task_step=task_step,
- workflow=workflow,
- workflow_id=workflow.id,
- available_documents=None,
- available_connections=None,
- previous_results=previous_results or [],
- previous_handover=None,
- improvements=[],
- retry_count=0,
- previous_action_results=[],
- previous_review_result=None,
- is_regeneration=False,
- failure_patterns=[],
- failed_actions=[],
- successful_actions=[],
- criteria_progress=None
- )
-
- # Check workflow status before calling AI service
- self._checkWorkflowStopped()
-
- # Generate the action definition prompt with placeholders
- action_prompt_template = createActionDefinitionPromptTemplate()
-
- # Extract content for placeholders
- user_prompt = extractUserPrompt(action_context)
- available_documents = extractAvailableDocuments(action_context)
- workflow_history = extractWorkflowHistory(self.services, action_context)
- available_methods = extractAvailableMethods(self.services)
- user_language = extractUserLanguage(self.services)
-
- # Create placeholders dictionary
- placeholders = {
- "USER_PROMPT": user_prompt,
- "AVAILABLE_DOCUMENTS": available_documents,
- "WORKFLOW_HISTORY": workflow_history,
- "AVAILABLE_METHODS": available_methods,
- "USER_LANGUAGE": user_language
- }
-
- # Trace action planning prompt
- self.writeTraceLog("Action Plan Prompt", action_prompt_template)
- self.writeTraceLog("Action Plan Placeholders", placeholders)
-
- # Centralized AI call: Action planning (quality, detailed) with placeholders
- options = AiCallOptions(
- operationType=OperationType.GENERATE_PLAN,
- priority=Priority.QUALITY,
- compressPrompt=False,
- compressContext=False,
- processingMode=ProcessingMode.DETAILED,
- maxCost=0.10,
- maxProcessingTime=30
- )
-
- prompt = await self.services.ai.callAi(
- prompt=action_prompt_template,
- placeholders=placeholders,
- options=options
- )
-
- # Check if AI response is valid
- if not prompt:
- raise ValueError("AI service returned no response")
-
- # Log action response received
- logger.info("=== ACTION PLAN AI RESPONSE RECEIVED ===")
- logger.info(f"Response length: {len(prompt) if prompt else 0}")
- # Trace action planning response
- self.writeTraceLog("Action Plan Response", prompt)
-
- # Inline parseActionResponse logic here
- json_start = prompt.find('{')
- json_end = prompt.rfind('}') + 1
- if json_start == -1 or json_end == 0:
- raise ValueError("No JSON found in response")
- json_str = prompt[json_start:json_end]
-
- try:
- action_data = json.loads(json_str)
- except Exception as e:
- logger.error(f"Error parsing action response JSON: {str(e)}")
- action_data = {}
-
- if 'actions' not in action_data:
- raise ValueError("Action response missing 'actions' field")
-
- actions = action_data['actions']
- if not actions:
- raise ValueError("Action response contains empty actions list")
-
- if not isinstance(actions, list):
- raise ValueError(f"Action response 'actions' field is not a list: {type(actions)}")
-
- if not self._validateActions(actions, action_context):
- logger.error("Generated actions failed validation")
- raise Exception("AI-generated actions failed validation - AI is required for action generation")
-
- # Convert to TaskAction objects
- task_actions = []
- for i, a in enumerate(actions):
- if not isinstance(a, dict):
- logger.warning(f"Skipping invalid action {i+1}: not a dictionary")
- continue
-
- task_action = self.createTaskAction({
- "execMethod": a.get('method', 'unknown'),
- "execAction": a.get('action', 'unknown'),
- "execParameters": a.get('parameters', {}),
- "execResultLabel": a.get('resultLabel', ''),
- "expectedDocumentFormats": a.get('expectedDocumentFormats', None),
- "status": TaskStatus.PENDING,
- # Extract user-friendly message if available
- "userMessage": a.get('userMessage', None)
- })
-
- if task_action:
- task_actions.append(task_action)
- else:
- logger.warning(f"Skipping invalid action {i+1}: failed to create TaskAction")
-
- valid_actions = [ta for ta in task_actions if ta]
-
- if not valid_actions:
- raise ValueError("No valid actions could be created from AI response")
-
- return valid_actions
- except Exception as e:
- logger.error(f"Error in generateTaskActions: {str(e)}")
- return []
-
- # ===== React-mode iterative functions =====
-
- async def plan_select(self, context: TaskContext) -> Dict[str, Any]:
- """Plan: select exactly one action. Returns {"action": {method, name}}"""
- prompt_template = createActionSelectionPromptTemplate()
-
- # Extract content for placeholders
- user_prompt = extractUserPrompt(context)
- available_documents = extractAvailableDocuments(context)
- user_language = extractUserLanguage(self.services)
- available_methods = extractAvailableMethods(self.services)
-
- # Create placeholders dictionary
- placeholders = {
- "USER_PROMPT": user_prompt,
- "AVAILABLE_DOCUMENTS": available_documents,
- "USER_LANGUAGE": user_language,
- "AVAILABLE_METHODS": available_methods
- }
-
- self.writeTraceLog("React Plan Selection Prompt", prompt_template)
- self.writeTraceLog("React Plan Selection Placeholders", placeholders)
-
- # Centralized AI call for plan selection (use plan generation quality)
- options = AiCallOptions(
- operationType=OperationType.GENERATE_PLAN,
- priority=Priority.QUALITY,
- compressPrompt=False,
- compressContext=False,
- processingMode=ProcessingMode.DETAILED,
- maxCost=0.10,
- maxProcessingTime=30
- )
-
- response = await self.services.ai.callAi(
- prompt=prompt_template,
- placeholders=placeholders,
- options=options
- )
- self.writeTraceLog("React Plan Selection Response", response)
- json_start = response.find('{') if response else -1
- json_end = response.rfind('}') + 1 if response else 0
- if json_start == -1 or json_end == 0:
- raise ValueError("No JSON in selection response")
- selection = json.loads(response[json_start:json_end])
- if 'action' not in selection or not isinstance(selection['action'], dict):
- raise ValueError("Selection missing 'action'")
- return selection
-
- async def act_execute(self, context: TaskContext, selection: Dict[str, Any], task_step: TaskStep, workflow, step_index: int) -> ActionResult:
- """Act: request minimal parameters then execute selected action."""
- action = selection.get('action', {})
- prompt_template = createActionParameterPromptTemplate()
-
- # Extract content for placeholders
- user_prompt = extractUserPrompt(context)
- # Use enhanced document context instead of simple summary for React mode
- available_documents = getEnhancedDocumentContext(self.services)
- user_language = extractUserLanguage(self.services)
-
- # Get available connections for React mode
- from modules.workflows.processing.promptFactory import _getConnectionReferenceList
- available_connections = _getConnectionReferenceList(self.services)
- available_connections_str = '\n'.join(f"- {conn}" for conn in available_connections) if available_connections else "No connections available"
-
- # Get action signature
- method = action.get('method', '')
- name = action.get('name', '')
- action_signature = ""
- if self.services and method in methods:
- method_instance = methods[method]['instance']
- action_signature = method_instance.getActionSignature(name)
-
- selected_action = f"{method}.{name}"
-
- # Create placeholders dictionary
- placeholders = {
- "USER_PROMPT": user_prompt,
- "AVAILABLE_DOCUMENTS": available_documents,
- "AVAILABLE_CONNECTIONS": available_connections_str,
- "USER_LANGUAGE": user_language,
- "SELECTED_ACTION": selected_action,
- "ACTION_SIGNATURE": action_signature
- }
-
- self.writeTraceLog("React Parameters Prompt", prompt_template)
- self.writeTraceLog("React Parameters Placeholders", placeholders)
-
- # Centralized AI call for parameter suggestion (balanced analysis)
- options = AiCallOptions(
- operationType=OperationType.ANALYSE_CONTENT,
- priority=Priority.BALANCED,
- compressPrompt=True,
- compressContext=False,
- processingMode=ProcessingMode.ADVANCED,
- maxCost=0.05,
- maxProcessingTime=30
- )
-
- params_resp = await self.services.ai.callAi(
- prompt=prompt_template,
- placeholders=placeholders,
- options=options
- )
- self.writeTraceLog("React Parameters Response", params_resp)
- js = params_resp[params_resp.find('{'):params_resp.rfind('}')+1] if params_resp else '{}'
- try:
- param_obj = json.loads(js)
- except Exception:
- param_obj = {"parameters": {}}
- parameters = param_obj.get('parameters', {}) if isinstance(param_obj, dict) else {}
-
- # Apply minimal defaults in-code (language)
- if 'language' not in parameters and hasattr(self.services, 'user') and getattr(self.services.user, 'language', None):
- parameters['language'] = self.services.user.language
-
- # Build a synthetic TaskAction for execution routing and labels
- current_round = getattr(self.workflow, 'currentRound', 0)
- current_task = getattr(self.workflow, 'currentTask', 0)
- result_label = f"round{current_round}_task{current_task}_action{step_index}_results"
- task_action = self.createTaskAction({
- "execMethod": action.get('method', ''),
- "execAction": action.get('name', ''),
- "execParameters": parameters,
- "execResultLabel": result_label,
- "status": TaskStatus.PENDING
- })
- # Execute using existing single action flow
- return await self.executeSingleAction(task_action, workflow, task_step, current_task, step_index, 1)
-
- def observe_build(self, action_result: ActionResult) -> Dict[str, Any]:
- """Observe: build compact observation object from ActionResult with full document metadata"""
- previews = []
- notes = []
- if action_result and action_result.documents:
- # Process all documents and show full metadata
- for doc in action_result.documents:
- # Extract all available metadata without content
- doc_metadata = {
- "name": getattr(doc, 'documentName', 'Unknown'),
- "mimeType": getattr(doc, 'mimeType', 'Unknown'),
- "size": getattr(doc, 'size', 'Unknown'),
- "created": getattr(doc, 'created', 'Unknown'),
- "modified": getattr(doc, 'modified', 'Unknown'),
- "typeGroup": getattr(doc, 'typeGroup', 'Unknown'),
- "documentId": getattr(doc, 'documentId', 'Unknown'),
- "reference": getattr(doc, 'reference', 'Unknown')
- }
- # Remove 'Unknown' values to keep it clean
- doc_metadata = {k: v for k, v in doc_metadata.items() if v != 'Unknown'}
-
- # Add content size indicator instead of actual content
- if hasattr(doc, 'documentData') and doc.documentData:
- if isinstance(doc.documentData, dict) and 'content' in doc.documentData:
- content_length = len(str(doc.documentData['content']))
- doc_metadata['contentSize'] = f"{content_length} characters"
- else:
- content_length = len(str(doc.documentData))
- doc_metadata['contentSize'] = f"{content_length} characters"
-
- # Extract comment if available
- if hasattr(doc, 'documentData') and doc.documentData:
- data = getattr(doc, 'documentData', None)
- if isinstance(data, dict):
- comment = data.get("comment", "")
- if comment:
- notes.append(f"Document '{doc_metadata.get('name', 'Unknown')}': {comment}")
-
- previews.append(doc_metadata)
-
- observation = {
- "success": bool(action_result.success),
- "resultLabel": action_result.resultLabel or "",
- "documentsCount": len(action_result.documents) if action_result.documents else 0,
- "previews": previews,
- "notes": notes
- }
- return observation
-
- async def refine_decide(self, context: TaskContext, observation: Dict[str, Any]) -> Dict[str, Any]:
- """Refine: decide continue or stop, with reason"""
- prompt_template = createRefinementPromptTemplate()
-
- # Extract content for placeholders
- user_prompt = extractUserPrompt(context)
- review_content = extractReviewContent(type('Context', (), {'observation': observation})())
-
- # Create placeholders dictionary
- placeholders = {
- "USER_PROMPT": user_prompt,
- "REVIEW_CONTENT": review_content
- }
-
- self.writeTraceLog("React Refinement Prompt", prompt_template)
- self.writeTraceLog("React Refinement Placeholders", placeholders)
-
- # Centralized AI call for refinement decision (balanced analysis)
- options = AiCallOptions(
- operationType=OperationType.ANALYSE_CONTENT,
- priority=Priority.BALANCED,
- compressPrompt=True,
- compressContext=False,
- processingMode=ProcessingMode.ADVANCED,
- maxCost=0.05,
- maxProcessingTime=30
- )
-
- resp = await self.services.ai.callAi(
- prompt=prompt_template,
- placeholders=placeholders,
- options=options
- )
- self.writeTraceLog("React Refinement Response", resp)
- js = resp[resp.find('{'):resp.rfind('}')+1] if resp else '{}'
- try:
- decision = json.loads(js)
- except Exception:
- decision = {"decision": "continue", "reason": "default"}
- return decision
-
- async def executeTask(self, task_step, workflow, context, task_index=None, total_tasks=None) -> TaskResult:
- """Execute all actions for a task step, with state management and retries.
- When workflow.workflowMode is 'React', run compact planβactβobserveβrefine loop.
- """
- logger.info(f"=== STARTING TASK {task_index or '?'}: {task_step.objective} ===")
-
- # PHASE 4: Update workflow object before executing task
- # Set currentTask=task_number, currentAction=0, totalActions=0
- if task_index is not None:
- self.updateWorkflowBeforeExecutingTask(task_index)
-
- # Update workflow context for this task
- if task_index is not None:
- self.services.workflow.setWorkflowContext(task_number=task_index)
- # Remove the increment call that causes double-increment bug
-
- # Create database log entry for task start in format expected by frontend
- if task_index is not None:
-
- # Create a task start message for the user
- task_progress = f"{task_index}/{total_tasks}" if total_tasks is not None else str(task_index)
- task_start_message = {
- "workflowId": workflow.id,
- "role": "assistant",
- "message": f"π **Task {task_progress}**",
- "status": "step",
- "sequenceNr": len(workflow.messages) + 1,
- "publishedAt": self.services.utils.getUtcTimestamp(),
- "documentsLabel": f"task_{task_index}_start",
- "documents": [],
- # Add workflow context fields
- "roundNumber": workflow.currentRound, # Use current workflow round
- "taskNumber": task_index,
- "actionNumber": 0,
- # Add task progress status
- "taskProgress": "running"
- }
-
- # Add user-friendly message if available
- if task_step.userMessage:
- task_start_message["message"] += f"\n\n㪠{task_step.userMessage}"
-
- message = self.services.interfaceDbChat.createMessage(task_start_message)
- if message:
- workflow.messages.append(message)
- logger.info(f"Task start message created for task {task_index}")
-
- state = TaskExecutionState(task_step)
- # React mode path - check workflow mode instead of context
- workflow_mode = getattr(context.workflow, 'workflowMode', 'Actionplan') if context.workflow else 'Actionplan'
- logger.info(f"Task execution - workflow mode: {workflow_mode}")
- if isinstance(context, TaskContext) and hasattr(context, 'workflow') and context.workflow and workflow_mode == 'React':
- logger.info(f"Using React mode execution with max_steps: {getattr(context.workflow, 'maxSteps', 5)}")
- state.max_steps = max(1, int(getattr(context.workflow, 'maxSteps', 5)))
- step = 1
- last_review_dict = None
- while step <= state.max_steps:
- self._checkWorkflowStopped()
- # Update workflow[currentAction] for UI
- self.updateWorkflowBeforeExecutingAction(step)
- self.services.workflow.setWorkflowContext(action_number=step)
- try:
- t0 = time.time()
- selection = await self.plan_select(context)
- logger.info(f"React step {step}: Selected action: {selection}")
-
- # Create user-friendly message BEFORE action execution
- await self.createReactActionMessage(workflow, selection, step, state.max_steps, task_index, "before")
-
- result = await self.act_execute(context, selection, task_step, workflow, step)
- observation = self.observe_build(result)
- # Attach deterministic label for clarity
- observation['resultLabel'] = result.resultLabel
- decision = await self.refine_decide(context, observation)
- # Telemetry: simple duration per step
- duration = time.time() - t0
- self.services.interfaceDbChat.createLog({
- "workflowId": workflow.id,
- "message": f"react_step_duration_sec={duration:.3f}",
- "type": "info"
- })
- last_review_dict = decision
-
- # Create user-friendly message AFTER action execution
- await self.createReactActionMessage(workflow, selection, step, state.max_steps, task_index, "after", result, observation)
- except Exception as e:
- logger.error(f"React step {step} error: {e}")
- break
-
- if not should_continue(observation, last_review_dict, step, state.max_steps):
- break
- step += 1
-
- # Summarize task result for react mode
- status = TaskStatus.COMPLETED
- success = True
- feedback = last_review_dict.get('reason') if isinstance(last_review_dict, dict) else 'Completed'
- if isinstance(last_review_dict, dict) and last_review_dict.get('decision') == 'stop':
- success = True
- return TaskResult(
- taskId=task_step.id,
- status=status,
- success=success,
- feedback=feedback,
- error=None if success else feedback
- )
- else:
- # Actionplan mode execution
- logger.info(f"Using Actionplan mode execution")
-
- retry_context = context
- max_retries = state.max_retries
- for attempt in range(max_retries):
- logger.info(f"Task execution attempt {attempt+1}/{max_retries}")
-
- # Check workflow status before starting task execution
- self._checkWorkflowStopped()
-
- # Update retry context with current attempt information
- if retry_context:
- retry_context.retry_count = attempt + 1
-
- actions = await self.generateTaskActions(task_step, workflow, previous_results=retry_context.previous_results, enhanced_context=retry_context)
-
- # Log total actions count for this task
- total_actions = len(actions) if actions else 0
- logger.info(f"Task {task_index or '?'} has {total_actions} actions")
-
- # PHASE 4: Update workflow object after action planning
- # Set totalActions=extracted_total_actions for THIS task
- self.updateWorkflowAfterActionPlanning(total_actions)
-
- # Set workflow action total for this task (0 if no actions generated)
- self.setWorkflowTotals(total_actions=total_actions)
-
- if not actions:
- logger.error("No actions defined for task step, aborting task execution")
- break
-
- action_results = []
- for action_idx, action in enumerate(actions):
- # Check workflow status before each action execution
- self._checkWorkflowStopped()
-
- # PHASE 4: Update workflow object before executing action
- # Set currentAction=action_number
- action_number = action_idx + 1
- self.updateWorkflowBeforeExecutingAction(action_number)
-
- # Update workflow context for this action
- self.services.workflow.setWorkflowContext(action_number=action_number)
- # Remove the increment call that causes double-increment bug
-
- # Log action start in format expected by frontend
- logger.info(f"Task {task_index} - Starting action {action_number}/{total_actions}")
-
- # Create an action start message for the user
- action_start_message = {
- "workflowId": workflow.id,
- "role": "assistant",
- "message": f"β‘ **Action {action_number}/{total_actions}** (Method {action.execMethod}.{action.execAction})",
- "status": "step",
- "sequenceNr": len(workflow.messages) + 1,
- "publishedAt": self.services.utils.getUtcTimestamp(),
- "documentsLabel": f"action_{action_number}_start",
- "documents": [],
- # Add action progress status
- "actionProgress": "running"
- }
-
- # Add user-friendly message if available
- if action.userMessage:
- action_start_message["message"] += f"\n\n㪠{action.userMessage}"
-
- # Add workflow context fields - use current workflow round instead of hardcoded 1
- action_start_message.update({
- "roundNumber": workflow.currentRound, # Use current workflow round
- "taskNumber": task_index,
- "actionNumber": action_number
- })
-
- message = self.services.interfaceDbChat.createMessage(action_start_message)
- if message:
- workflow.messages.append(message)
- logger.info(f"Action start message created for action {action_number}")
-
- # Pass action index to executeSingleAction with task context
- result = await self.executeSingleAction(action, workflow, task_step, task_index, action_number, total_actions)
- action_results.append(result)
- if result.success:
- state.addSuccessfulAction(result)
- else:
- state.addFailedAction(result)
-
- # Check workflow status before review
- self._checkWorkflowStopped()
-
- review_result = await self.reviewTaskCompletion(task_step, actions, action_results, workflow)
- success = review_result.status == 'success'
- feedback = review_result.reason
- error = None if success else review_result.reason
- if success:
- logger.info(f"=== TASK {task_index or '?'} COMPLETED SUCCESSFULLY: {task_step.objective} ===")
-
- # Create a task completion message for the user
- task_progress = f"{task_index}/{total_tasks}" if total_tasks is not None else str(task_index)
-
- # Enhanced completion message with criteria details
- completion_message = f"π― **Task {task_progress}**\n\nβ
{feedback or 'Task completed successfully'}"
-
- # Add criteria status if available
- if hasattr(review_result, 'met_criteria') and review_result.met_criteria:
- for criterion in review_result.met_criteria:
- completion_message += f"\nβ’ {criterion}"
-
- if hasattr(review_result, 'quality_score'):
- completion_message += f"\nπ Score {review_result.quality_score}/10"
-
- task_completion_message = {
- "workflowId": workflow.id,
- "role": "assistant",
- "message": completion_message,
- "status": "step",
- "sequenceNr": len(workflow.messages) + 1,
- "publishedAt": self.services.utils.getUtcTimestamp(),
- "documentsLabel": f"task_{task_index}_completion",
- "documents": [],
- # Add workflow context fields
- "roundNumber": workflow.currentRound, # Use current workflow round
- "taskNumber": task_index,
- "actionNumber": 0,
- # Add task progress status
- "taskProgress": "success"
- }
-
- message = self.services.interfaceDbChat.createMessage(task_completion_message)
- if message:
- workflow.messages.append(message)
- logger.info(f"Task completion message created for task {task_index}")
-
- return TaskResult(
- taskId=task_step.id,
- status=TaskStatus.COMPLETED,
- success=True,
- feedback=feedback,
- error=None
- )
-
- elif review_result.status == 'retry' and state.canRetry():
- logger.warning(f"Task step '{task_step.objective}' requires retry: {review_result.improvements}")
-
- # Enhanced logging of criteria status
- if review_result.met_criteria:
- logger.info(f"Met criteria: {', '.join(review_result.met_criteria)}")
- if review_result.unmet_criteria:
- logger.warning(f"Unmet criteria: {', '.join(review_result.unmet_criteria)}")
-
- state.incrementRetryCount()
-
- # Update retry context with retry information and criteria tracking
- if retry_context:
- retry_context.retry_count = state.retry_count
- retry_context.improvements = review_result.improvements
- retry_context.previous_action_results = action_results
- retry_context.previous_review_result = review_result
- retry_context.is_regeneration = True
- retry_context.failure_patterns = state.getFailurePatterns()
- retry_context.failed_actions = state.failed_actions
- retry_context.successful_actions = state.successful_actions
-
- # Track criteria progress across retries
- if not hasattr(retry_context, 'criteria_progress'):
- retry_context.criteria_progress = {
- 'met_criteria': set(),
- 'unmet_criteria': set(),
- 'attempt_history': []
- }
-
- # Update criteria progress - convert lists to sets for deduplication
- if review_result.met_criteria:
- retry_context.criteria_progress['met_criteria'].update(review_result.met_criteria)
- if review_result.unmet_criteria:
- retry_context.criteria_progress['unmet_criteria'].update(review_result.unmet_criteria)
-
- # Record this attempt's criteria status
- attempt_record = {
- 'attempt': state.retry_count,
- 'met_criteria': review_result.met_criteria or [],
- 'unmet_criteria': review_result.unmet_criteria or [],
- 'quality_score': review_result.quality_score,
- 'improvements': review_result.improvements or []
- }
- retry_context.criteria_progress['attempt_history'].append(attempt_record)
-
- logger.info(f"Criteria progress after {state.retry_count} attempts:")
- logger.info(f" Total met: {len(retry_context.criteria_progress['met_criteria'])}")
- logger.info(f" Total unmet: {len(retry_context.criteria_progress['unmet_criteria'])}")
- if retry_context.criteria_progress['met_criteria']:
- logger.info(f" Met criteria: {', '.join(retry_context.criteria_progress['met_criteria'])}")
- if retry_context.criteria_progress['unmet_criteria']:
- logger.info(f" Unmet criteria: {', '.join(retry_context.criteria_progress['unmet_criteria'])}")
-
- # Log retry summary for debugging
- logger.info(f"=== RETRY #{state.retry_count} SUMMARY ===")
- logger.info(f"Task: {task_step.objective}")
- logger.info(f"Quality Score: {review_result.quality_score}/10")
- logger.info(f"Status: {review_result.status}")
- logger.info(f"Improvements Needed: {review_result.improvements}")
- logger.info(f"Reason: {review_result.reason}")
- logger.info("=== END RETRY SUMMARY ===")
-
- # Create retry message for user
- retry_message = {
- "workflowId": workflow.id,
- "role": "assistant",
- "message": f"π **Task {task_index}** needs retry: {review_result.improvements}",
- "status": "step",
- "sequenceNr": len(workflow.messages) + 1,
- "publishedAt": self.services.utils.getUtcTimestamp(),
- "documentsLabel": f"task_{task_index}_retry",
- "documents": [],
- "roundNumber": workflow.currentRound,
- "taskNumber": task_index,
- "actionNumber": 0,
- "taskProgress": "retry"
- }
-
- message = self.services.interfaceDbChat.createMessage(retry_message)
- if message:
- workflow.messages.append(message)
-
- continue
- else:
- logger.error(f"=== TASK {task_index or '?'} FAILED: {task_step.objective} after {attempt+1} attempts ===")
- task_progress = f"{task_index}/{total_tasks}" if total_tasks is not None else str(task_index)
-
- # Create user-facing error message for task failure
- error_message = f"**Task {task_progress}**\n\nβ '{task_step.objective}' {attempt+1}x failed\n\n"
-
- # Add specific error details if available
- if review_result and hasattr(review_result, 'reason') and review_result.reason:
- error_message += f"{review_result.reason}\n\n"
-
- # Add criteria progress information if available
- if retry_context and hasattr(retry_context, 'criteria_progress'):
- progress = retry_context.criteria_progress
- error_message += f"π **Details**\n"
- if progress.get('met_criteria'):
- error_message += f"β
Met criteria: {', '.join(progress['met_criteria'])}\n"
- if progress.get('unmet_criteria'):
- error_message += f"β Unmet criteria: {', '.join(progress['unmet_criteria'])}\n"
- error_message += "\n"
-
- # Add retry information
- error_message += f"Attempts: {attempt+1}\n"
- error_message += f"Status: Will retry automatically\n\n"
- error_message += "The system will attempt to retry this task. Please wait..."
-
- # Create workflow message for user
- message_data = {
- "workflowId": workflow.id,
- "role": "assistant",
- "message": error_message,
- "status": "step",
- "sequenceNr": len(workflow.messages) + 1,
- "publishedAt": self.services.utils.getUtcTimestamp(),
- "actionId": None,
- "actionMethod": "task",
- "actionName": "task_retry",
- "documentsLabel": None,
- "documents": [],
- # Add workflow context fields
- "roundNumber": workflow.currentRound, # Use current workflow round
- "taskNumber": task_index,
- "actionNumber": 0,
- # Add task progress status
- "taskProgress": "retry"
- }
-
- try:
- message = self.services.interfaceDbChat.createMessage(message_data)
- if message:
- workflow.messages.append(message)
- logger.info(f"Created user-facing retry message for failed task: {task_step.objective}")
- else:
- logger.error(f"Failed to create user-facing retry message for failed task: {task_step.objective}")
- except Exception as e:
- logger.error(f"Error creating user-facing retry message: {str(e)}")
-
- return TaskResult(
- taskId=task_step.id,
- status=TaskStatus.FAILED,
- success=False,
- feedback=feedback,
- error=review_result.reason if review_result and hasattr(review_result, 'reason') else "Task failed after retry attempts"
- )
- logger.error(f"=== TASK {task_index or '?'} FAILED AFTER ALL RETRIES: {task_step.objective} ===")
-
- # Create user-facing error message for task failure
- error_message = f"**Task {task_index or '?'}**\n\nβ '{task_step.objective}' failed after all retries\n\n"
- error_message += f"{task_step.objective}\n\n"
-
- # Add specific error details if available
- if retry_context and hasattr(retry_context, 'previous_review_result') and retry_context.previous_review_result:
- reason = retry_context.previous_review_result.reason or ''
- if reason and reason != "Task failed after all retries.":
- error_message += f"{reason}\n\n"
-
- # Add retry information
- error_message += f"Retries attempted: {retry_context.retry_count if retry_context else 'Unknown'}\n"
- error_message += f"Status: Task failed permanently"
-
- # Create workflow message for user
- message_data = {
- "workflowId": workflow.id,
- "role": "assistant",
- "message": error_message,
- "status": "step",
- "sequenceNr": len(workflow.messages) + 1,
- "publishedAt": self.services.utils.getUtcTimestamp(),
- "actionId": None,
- "actionMethod": "task",
- "actionName": "task_failure",
- "documentsLabel": None,
- "documents": [],
- # Add workflow context fields
- "roundNumber": workflow.currentRound, # Use current workflow round
- "taskNumber": task_index,
- "actionNumber": 0,
- # NEW: Add task progress status
- "taskProgress": "fail"
- }
-
- try:
- message = self.services.interfaceDbChat.createMessage(message_data)
- if message:
- workflow.messages.append(message)
- logger.info(f"Created user-facing error message for failed task: {task_step.objective}")
- else:
- logger.error(f"Failed to create user-facing error message for failed task: {task_step.objective}")
- except Exception as e:
- logger.error(f"Error creating user-facing error message: {str(e)}")
-
- return TaskResult(
- taskId=task_step.id,
- status=TaskStatus.FAILED,
- success=False,
- feedback="Task failed after all retries.",
- error="Task failed after all retries."
- )
-
- async def reviewTaskCompletion(self, task_step, task_actions, action_results, workflow):
- try:
- # Check workflow status before reviewing task completion
- self._checkWorkflowStopped()
-
- logger.info(f"=== STARTING TASK COMPLETION REVIEW ===")
- logger.info(f"Task: {task_step.objective}")
- logger.info(f"Actions executed: {len(task_actions) if task_actions else 0}")
- logger.info(f"Action results: {len(action_results) if action_results else 0}")
-
- # Create proper context object for result review
- review_context = ReviewContext(
- task_step=task_step,
- task_actions=task_actions,
- action_results=action_results,
- step_result={
- 'successful_actions': sum(1 for result in action_results if result.success),
- 'total_actions': len(action_results),
- 'results': [self._extractResultText(result) for result in action_results if result.success],
- 'errors': [result.error for result in action_results if not result.success],
- 'documents': [
- {
- 'action_index': i,
- 'documents_count': len(result.documents) if result.documents else 0,
- 'documents': result.documents if result.documents else []
- }
- for i, result in enumerate(action_results)
- ]
- },
- workflow_id=workflow.id,
- previous_results=[]
- )
-
- # Check workflow status before calling AI service
- self._checkWorkflowStopped()
-
- # Use placeholder-based review prompt
- prompt_template = createResultReviewPromptTemplate()
-
- # Extract content for placeholders
- user_prompt = extractUserPrompt(review_context)
- review_content = extractReviewContent(review_context)
-
- # Create placeholders dictionary
- placeholders = {
- "USER_PROMPT": user_prompt,
- "REVIEW_CONTENT": review_content
- }
-
- # Log result review prompt sent to AI
- logger.info("=== RESULT REVIEW PROMPT SENT TO AI ===")
- logger.info(f"Task: {task_step.objective}")
- logger.info(f"Action Results Count: {len(review_context.action_results) if review_context.action_results else 0}")
- logger.info(f"Task Actions Count: {len(review_context.task_actions) if review_context.task_actions else 0}")
- # Trace result review prompt
- self.writeTraceLog("Result Review Prompt", prompt_template)
- self.writeTraceLog("Result Review Placeholders", placeholders)
-
- # Centralized AI call: Result validation (balanced analysis) with placeholders
- options = AiCallOptions(
- operationType=OperationType.ANALYSE_CONTENT,
- priority=Priority.BALANCED,
- compressPrompt=True,
- compressContext=False,
- processingMode=ProcessingMode.ADVANCED,
- maxCost=0.05,
- maxProcessingTime=30
- )
-
- response = await self.services.ai.callAi(
- prompt=prompt_template,
- placeholders=placeholders,
- options=options
- )
-
- # Log result review response received
- logger.info("=== RESULT REVIEW AI RESPONSE RECEIVED ===")
- logger.info(f"Response length: {len(response) if response else 0}")
- # Trace result review response
- self.writeTraceLog("Result Review Response", response)
-
- # Inline parseReviewResponse logic here
- json_start = response.find('{')
- json_end = response.rfind('}') + 1
- if json_start == -1 or json_end == 0:
- raise ValueError("No JSON found in review response")
- json_str = response[json_start:json_end]
-
- try:
- review = json.loads(json_str)
- except Exception as e:
- logger.error(f"Error parsing review response JSON: {str(e)}")
- review = {}
- if 'status' not in review:
- raise ValueError("Review response missing 'status' field")
- review.setdefault('status', 'unknown')
- review.setdefault('reason', 'No reason provided')
- review.setdefault('quality_score', 5)
-
- # Ensure improvements is a list
- improvements = review.get('improvements', [])
- if isinstance(improvements, str):
- # Split string into list if it's a single improvement
- improvements = [improvements.strip()] if improvements.strip() else []
- elif not isinstance(improvements, list):
- improvements = []
-
- # Ensure all list fields are properly typed
- met_criteria = review.get('met_criteria', [])
- if not isinstance(met_criteria, list):
- met_criteria = []
-
- unmet_criteria = review.get('unmet_criteria', [])
- if not isinstance(unmet_criteria, list):
- unmet_criteria = []
-
- review_result = ReviewResult(
- status=review.get('status', 'unknown'),
- reason=review.get('reason', 'No reason provided'),
- improvements=improvements,
- quality_score=review.get('quality_score', 5),
- missing_outputs=[],
- met_criteria=met_criteria,
- unmet_criteria=unmet_criteria,
- confidence=review.get('confidence', 0.5),
- # Extract user-friendly message if available
- userMessage=review.get('userMessage', None)
- )
-
- # Enhanced validation logging
- logger.info(f"VALIDATION RESULT - Task: '{task_step.objective}' - Status: {review_result.status.upper()}, Quality: {review_result.quality_score}/10")
- if review_result.status == 'success':
- logger.info(f"VALIDATION SUCCESS - Task completed successfully")
- if review_result.met_criteria:
- logger.info(f"Met criteria: {', '.join(review_result.met_criteria)}")
- elif review_result.status == 'retry':
- logger.warning(f"VALIDATION RETRY - Task requires retry: {review_result.improvements}")
- if review_result.unmet_criteria:
- logger.warning(f"Unmet criteria: {', '.join(review_result.unmet_criteria)}")
- else:
- logger.error(f"VALIDATION FAILED - Task failed: {review_result.reason}")
-
- logger.info(f"=== TASK COMPLETION REVIEW FINISHED ===")
- logger.info(f"Final Status: {review_result.status}")
- logger.info(f"Quality Score: {review_result.quality_score}/10")
- logger.info(f"Improvements: {review_result.improvements}")
- logger.info("=== END REVIEW ===")
-
- return review_result
- except Exception as e:
- logger.error(f"Error in reviewTaskCompletion: {str(e)}")
- return ReviewResult(
- status='failed',
- reason=str(e),
- quality_score=0
- )
-
- async def prepareTaskHandover(self, task_step, task_actions, task_result, workflow):
- try:
- # Check workflow status before preparing task handover
- self._checkWorkflowStopped()
-
- # Log handover status summary
- status = task_result.status if task_result else 'unknown'
-
- # Handle both TaskResult and ReviewResult objects
- if hasattr(task_result, 'met_criteria'):
- # This is a ReviewResult object
- met = task_result.met_criteria if task_result.met_criteria else []
- review_result = task_result.to_dict()
- else:
- # This is a TaskResult object
- met = []
- review_result = {
- 'status': task_result.status if task_result else 'unknown',
- 'reason': task_result.error if task_result and hasattr(task_result, 'error') else None,
- 'success': task_result.success if task_result else False
- }
-
- handover_data = {
- 'task_id': task_step.id,
- 'task_description': task_step.objective,
- 'actions': [action.to_dict() for action in task_actions],
- 'review_result': review_result,
- 'workflow_id': workflow.id,
- 'handover_time': self.services.utils.getUtcTimestamp()
- }
- logger.info(f"Prepared handover for task {task_step.id} in workflow {workflow.id}")
- return handover_data
- except Exception as e:
- logger.error(f"Error in prepareTaskHandover: {str(e)}")
- return {'error': str(e)}
-
- def createTaskAction(self, actionData: Dict[str, Any]) -> 'TaskAction':
- """Creates a new task action."""
- try:
- # Ensure ID is present
- if "id" not in actionData or not actionData["id"]:
- actionData["id"] = f"action_{uuid.uuid4()}"
-
- # Ensure required fields
- if "status" not in actionData:
- actionData["status"] = TaskStatus.PENDING
-
- if "execMethod" not in actionData:
- logger.error("execMethod is required for task action")
- return None
-
- if "execAction" not in actionData:
- logger.error("execAction is required for task action")
- return None
-
- if "execParameters" not in actionData:
- actionData["execParameters"] = {}
-
- # Use generic field separation based on TaskAction model
- simple_fields, object_fields = self.services.interfaceDbChat._separate_object_fields(TaskAction, actionData)
-
- # Create action in database
- createdAction = self.services.interfaceDbChat.db.recordCreate(TaskAction, simple_fields)
-
- # Convert to TaskAction model
- return TaskAction(
- id=createdAction["id"],
- execMethod=createdAction["execMethod"],
- execAction=createdAction["execAction"],
- execParameters=createdAction.get("execParameters", {}),
- execResultLabel=createdAction.get("execResultLabel"),
- expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
- status=createdAction.get("status", TaskStatus.PENDING),
- error=createdAction.get("error"),
- retryCount=createdAction.get("retryCount", 0),
- retryMax=createdAction.get("retryMax", 3),
- processingTime=createdAction.get("processingTime"),
- timestamp=float(createdAction.get("timestamp", self.services.utils.getUtcTimestamp())),
- result=createdAction.get("result"),
- resultDocuments=createdAction.get("resultDocuments", []),
- userMessage=createdAction.get("userMessage")
- )
-
- except Exception as e:
- logger.error(f"Error creating task action: {str(e)}")
- return None
-
- # --- Helper action handling methods ---
-
- async def executeSingleAction(self, action, workflow, task_step, task_index=None, action_index=None, total_actions=None):
- """Execute a single action and return ActionResult with enhanced document processing"""
- try:
- # Check workflow status before executing action
- self._checkWorkflowStopped()
-
- # Use passed indices or fallback to '?'
- task_num = task_index if task_index is not None else '?'
- action_num = action_index if action_index is not None else '?'
-
- logger.info(f"=== TASK {task_num} ACTION {action_num}: {action.execMethod}.{action.execAction} ===")
-
- # Log input parameters
- input_docs = action.execParameters.get('documentList', [])
- input_connections = action.execParameters.get('connections', [])
- logger.info(f"Input documents: {input_docs} (type: {type(input_docs)})")
- if input_connections:
- logger.info(f"Input connections: {input_connections}")
-
- # Log all action parameters for debugging
- logger.info(f"All action parameters: {action.execParameters}")
-
- enhanced_parameters = action.execParameters.copy()
- if action.expectedDocumentFormats:
- enhanced_parameters['expectedDocumentFormats'] = action.expectedDocumentFormats
- logger.info(f"Expected formats: {action.expectedDocumentFormats}")
-
- # Check workflow status before executing the action
- self._checkWorkflowStopped()
-
- result = await self.executeAction(
- methodName=action.execMethod,
- actionName=action.execAction,
- parameters=enhanced_parameters
- )
- result_label = action.execResultLabel
-
- # Trace action result with full document metadata
- action_result_trace = {
- "method": action.execMethod,
- "action": action.execAction,
- "success": result.success,
- "error": result.error,
- "resultLabel": result_label,
- "documentsCount": len(result.documents) if result.documents else 0
- }
-
- # Add full document metadata if documents exist
- if result.documents:
- action_result_trace["documents"] = []
- for doc in result.documents:
- doc_metadata = {
- "name": getattr(doc, 'documentName', 'Unknown'),
- "mimeType": getattr(doc, 'mimeType', 'Unknown'),
- "size": getattr(doc, 'size', 'Unknown'),
- "created": getattr(doc, 'created', 'Unknown'),
- "modified": getattr(doc, 'modified', 'Unknown'),
- "typeGroup": getattr(doc, 'typeGroup', 'Unknown'),
- "documentId": getattr(doc, 'documentId', 'Unknown'),
- "reference": getattr(doc, 'reference', 'Unknown')
- }
- # Remove 'Unknown' values to keep it clean
- doc_metadata = {k: v for k, v in doc_metadata.items() if v != 'Unknown'}
- action_result_trace["documents"].append(doc_metadata)
-
- self.writeTraceLog("Action Result", action_result_trace)
-
- # Process documents from the action result
- created_documents = []
- if result.success:
- action.setSuccess()
- # Extract result text from ALL documents using generation service
- action.result = self._extractResultText(result)
- # Preserve the action's execResultLabel for document routing
- # Action methods should NOT return resultLabel - this is managed by the action handler
- if not action.execResultLabel:
- logger.warning(f"Action {action.execMethod}.{action.execAction} has no execResultLabel set")
- # Always use the action's execResultLabel for message creation to ensure proper document routing
- message_result_label = action.execResultLabel
-
- # Create documents first, then create the message with documents attached in one write
- created_documents = self.services.generation.createDocumentsFromActionResult(result, action, workflow, None)
- message = await self.createActionMessage(
- action,
- result,
- workflow,
- message_result_label,
- created_documents,
- task_step,
- task_index,
- action_index,
- total_actions
- )
-
- # Log action results
- logger.info(f"Action completed successfully")
-
- if created_documents:
- logger.info(f"Output documents ({len(created_documents)}):")
- for i, doc in enumerate(created_documents):
- logger.info(f" {i+1}. {doc.fileName}")
-
- # Log document details for debugging
- logger.info("Document details:")
- for i, doc in enumerate(created_documents):
- logger.info(f" Doc {i+1}: fileName={doc.fileName}, type={type(doc)}")
- logger.info(f" ID: {doc.id}")
- logger.info(f" File ID: {doc.fileId}")
- else:
- logger.info("Output: No documents created")
- else:
- action.setError(result.error or "Action execution failed")
- logger.error(f"Action failed: {result.error}")
-
- # β οΈ IMPORTANT: Create error message for failed actions so user can see what went wrong
- message = await self.createActionMessage(action, result, workflow, result_label, [], task_step, task_index, action_index, total_actions)
-
- # Create database log entry for action failure
- self.services.interfaceDbChat.createLog({
- "workflowId": workflow.id,
- "message": f"β **Task {task_num}**\n\nβ **Action {action_num}/{total_actions}** failed: {result.error}",
- "type": "error"
- })
-
- # Log action summary
- logger.info(f"=== TASK {task_num} ACTION {action_num} COMPLETED ===")
-
- # Preserve the original documents field from the method result
- # This ensures the standard document format is maintained
- original_documents = result.documents
-
- # Extract result text from documents if available
- result_text = self._extractResultText(result)
-
- return ActionResult(
- success=result.success,
- documents=original_documents, # Preserve original documents field from method result
- resultLabel=action.execResultLabel, # Always use action's execResultLabel
- error=result.error or ""
- )
- except Exception as e:
- logger.error(f"Error executing single action: {str(e)}")
- action.setError(str(e))
- return ActionResult(
- success=False,
- documents=[], # Empty documents for error case
- resultLabel=action.execResultLabel,
- error=str(e)
- )
-
- async def createActionMessage(self, action, result, workflow, result_label=None, created_documents=None, task_step=None, task_index=None, action_index=None, total_actions=None):
- """Create and store a message for the action result in the workflow with enhanced document processing"""
- try:
- # Check workflow status before creating action message
- self._checkWorkflowStopped()
-
- if result_label is None:
- result_label = action.execResultLabel
-
- # Log delivered documents
- if created_documents:
- logger.info(f"Result label: {result_label} - {len(created_documents)} documents")
- else:
- logger.info(f"Result label: {result_label} - No documents")
-
- # Get current workflow context and stats
- workflow_context = self.services.workflow.getWorkflowContext()
- workflow_stats = self.services.workflow.getWorkflowStats()
-
- # Create a more meaningful message that includes task context
- task_objective = task_step.objective if task_step else 'Unknown task'
-
- # Extract round, task, and action numbers from result_label first, then fallback to workflow context
- current_round = self._extractRoundNumberFromLabel(result_label) if result_label else workflow_context.get('currentRound', 0)
- current_task = self._extractTaskNumberFromLabel(result_label) if result_label else (task_index if task_index is not None else workflow_context.get('currentTask', 0))
- total_tasks = workflow_stats.get('totalTasks', 0)
- current_action = self._extractActionNumberFromLabel(result_label) if result_label else (action_index if action_index is not None else workflow_context.get('currentAction', 0))
- total_actions = total_actions if total_actions is not None else workflow_stats.get('totalActions', 0)
-
- # Debug logging for round number extraction
- logger.info(f"Action message round number extraction: result_label='{result_label}', extracted_round={current_round}, workflow_round={workflow_context.get('currentRound', 0)}")
-
- # Build a user-friendly message based on success/failure
- if result.success:
- message_text = f"**Action {current_action}/{total_actions} ({action.execMethod}.{action.execAction})**\n\n"
- message_text += f"β
{task_objective}\n\n"
- else:
- # β οΈ FAILURE MESSAGE - Show error details to user
- error_details = result.error if result.error else "Unknown error occurred"
- message_text = f"**Action {current_action}/{total_actions} ({action.execMethod}.{action.execAction})**\n\n"
- message_text += f"β {task_objective}\n\n"
- message_text += f"{error_details}\n\n"
-
- message_data = {
- "workflowId": workflow.id,
- "role": "assistant",
- "message": message_text,
- "status": "step",
- "sequenceNr": len(workflow.messages) + 1,
- "publishedAt": self.services.utils.getUtcTimestamp(),
- "actionId": action.id,
- "actionMethod": action.execMethod,
- "actionName": action.execAction,
- "documentsLabel": result_label,
- "documents": created_documents,
- # Add workflow context fields - extract from result_label to match document reference
- "roundNumber": current_round,
- "taskNumber": current_task,
- "actionNumber": current_action,
- "actionProgress": "success" if result.success else "fail"
- }
-
- # Add debugging for error messages
- if not result.success:
- logger.info(f"Creating ERROR message: {message_text}")
- logger.info(f"Message data: {message_data}")
-
- message = self.services.interfaceDbChat.createMessage(message_data)
- if message:
- workflow.messages.append(message)
- logger.info(f"Message created: {action.execMethod}.{action.execAction}")
- return message
- else:
- logger.error(f"Failed to create workflow message for action {action.execMethod}.{action.execAction}")
- return None
- except Exception as e:
- logger.error(f"Error creating action message: {str(e)}")
- return None
-
- # --- Helper validation methods ---
-
- def _validateTaskPlan(self, task_plan: Dict[str, Any]) -> bool:
- try:
-
-
- if not isinstance(task_plan, dict):
- logger.error("Task plan is not a dictionary")
- return False
-
- if 'tasks' not in task_plan or not isinstance(task_plan['tasks'], list):
- logger.error(f"Task plan missing 'tasks' field or not a list. Found: {type(task_plan.get('tasks', 'MISSING'))}")
- return False
-
- # First pass: collect all task IDs to validate dependencies
- task_ids = set()
- for task in task_plan['tasks']:
- if not isinstance(task, dict):
- logger.error(f"Task is not a dictionary: {type(task)}")
- return False
- if 'id' not in task:
- logger.error(f"Task missing 'id' field: {task}")
- return False
- task_ids.add(task['id'])
-
- # Second pass: validate each task
- for i, task in enumerate(task_plan['tasks']):
-
-
- if not isinstance(task, dict):
- logger.error(f"Task {i} is not a dictionary: {type(task)}")
- return False
-
- required_fields = ['id', 'objective', 'success_criteria']
- missing_fields = [field for field in required_fields if field not in task]
- if missing_fields:
- logger.error(f"Task {i} missing required fields: {missing_fields}")
- return False
-
- # Check for duplicate IDs (shouldn't happen after first pass, but safety check)
- if task['id'] in task_ids and list(task_plan['tasks']).count(task['id']) > 1:
- logger.error(f"Task {i} has duplicate ID: {task['id']}")
- return False
-
- dependencies = task.get('dependencies', [])
- if not isinstance(dependencies, list):
- logger.error(f"Task {i} dependencies is not a list: {type(dependencies)}")
- return False
-
- for dep in dependencies:
- if dep not in task_ids and dep != 'task_0':
- logger.error(f"Task {i} has invalid dependency: {dep} (available: {list(task_ids) + ['task_0']})")
- return False
-
- logger.info(f"Task plan validation successful with {len(task_ids)} tasks")
- return True
-
- except Exception as e:
- logger.error(f"Error validating task plan: {str(e)}")
- return False
-
- def _extractRoundNumberFromLabel(self, label: str) -> int:
- """Extract round number from a document label like 'round1_task1_action1_diagram_analysis'"""
- try:
- if not label or not isinstance(label, str):
- return 0
-
- # Parse label format: round{round}_task{task}_action{action}_{context}
- if label.startswith('round'):
- round_part = label.split('_')[0] # Get 'round1' part
- if round_part.startswith('round'):
- round_number = round_part[5:] # Remove 'round' prefix
- return int(round_number)
-
- return 0
- except Exception as e:
- logger.warning(f"Could not extract round number from label '{label}': {str(e)}")
- return 0
-
- def _extractTaskNumberFromLabel(self, label: str) -> int:
- """Extract task number from a document label like 'round1_task1_action1_diagram_analysis'"""
- try:
- if not label or not isinstance(label, str):
- return 0
-
- # Parse label format: round{round}_task{task}_action{action}_{context}
- if '_task' in label:
- task_part = label.split('_task')[1]
- if task_part and '_' in task_part:
- task_number = task_part.split('_')[0]
- return int(task_number)
-
- return 0
- except Exception as e:
- logger.warning(f"Could not extract task number from label '{label}': {str(e)}")
- return 0
-
- def _extractActionNumberFromLabel(self, label: str) -> int:
- """Extract action number from a document label like 'round1_task1_action1_diagram_analysis'"""
- try:
- if not label or not isinstance(label, str):
- return 0
-
- # Parse label format: round{round}_task{task}_action{action}_{context}
- if '_action' in label:
- action_part = label.split('_action')[1]
- if action_part and '_' in action_part:
- action_number = action_part.split('_')[0]
- return int(action_number)
-
- return 0
- except Exception as e:
- logger.warning(f"Could not extract action number from label '{label}': {str(e)}")
- return 0
-
- def _validateActions(self, actions: List[Dict[str, Any]], context) -> bool:
- try:
- if not isinstance(actions, list):
- logger.error("Actions must be a list")
- return False
- if len(actions) == 0:
- logger.warning("No actions generated")
- return False
- for i, action in enumerate(actions):
- if not isinstance(action, dict):
- logger.error(f"Action {i} must be a dictionary")
- return False
- required_fields = ['method', 'action', 'parameters', 'resultLabel']
- missing_fields = []
- for field in required_fields:
- if field not in action or not action[field]:
- missing_fields.append(field)
- if missing_fields:
- logger.error(f"Action {i} missing required fields: {missing_fields}")
- return False
- result_label = action.get('resultLabel', '')
- if not result_label.startswith('round'):
- logger.error(f"Action {i} result label must start with 'round': {result_label}")
- return False
- parameters = action.get('parameters', {})
- if not isinstance(parameters, dict):
- logger.error(f"Action {i} parameters must be a dictionary")
- return False
- logger.info(f"Successfully validated {len(actions)} actions")
- return True
- except Exception as e:
- logger.error(f"Error validating actions: {str(e)}")
- return False
-
- def _extractResultText(self, result: ActionResult) -> str:
- """Extract result text from ActionResult documents"""
- if not result.success or not result.documents:
- return ""
-
- # Extract text directly from ActionDocument objects
- result_parts = []
- for doc in result.documents:
- if hasattr(doc, 'documentData') and doc.documentData:
- result_parts.append(str(doc.documentData))
-
- # Join all document results with separators
- return "\n\n---\n\n".join(result_parts) if result_parts else ""
-
- # PHASE 4: Workflow Object Update Rules Implementation
-
- def updateWorkflowAfterTaskPlanCreated(self, total_tasks: int):
- """
- Update workflow object after task plan is created.
- Rule: Set currentTask=1, currentAction=0, totalTasks=extracted_total_tasks, totalActions=0
- """
- try:
- update_data = {
- "currentTask": 1,
- "currentAction": 0,
- "totalTasks": total_tasks,
- "totalActions": 0
- }
-
- # Update workflow object
- self.workflow.currentTask = 1
- self.workflow.currentAction = 0
- self.workflow.totalTasks = total_tasks
- self.workflow.totalActions = 0
-
- # Update in database
- self.services.interfaceDbChat.updateWorkflow(self.workflow.id, update_data)
- logger.info(f"Updated workflow {self.workflow.id} after task plan created: {update_data}")
-
- except Exception as e:
- logger.error(f"Error updating workflow after task plan created: {str(e)}")
-
- def updateWorkflowBeforeExecutingTask(self, task_number: int):
- """
- Update workflow object before executing a task.
- Rule: Set currentTask=task_number, currentAction=0, totalActions=0
- """
- try:
- update_data = {
- "currentTask": task_number,
- "currentAction": 0,
- "totalActions": 0
- }
-
- # Update workflow object
- self.workflow.currentTask = task_number
- self.workflow.currentAction = 0
- self.workflow.totalActions = 0
-
- # Update in database
- self.services.interfaceDbChat.updateWorkflow(self.workflow.id, update_data)
- logger.info(f"Updated workflow {self.workflow.id} before executing task {task_number}: {update_data}")
-
- except Exception as e:
- logger.error(f"Error updating workflow before executing task: {str(e)}")
-
- def updateWorkflowAfterActionPlanning(self, total_actions: int):
- """
- Update workflow object after action planning for current task.
- Rule: Set totalActions=extracted_total_actions for THIS task
- """
- try:
- update_data = {
- "totalActions": total_actions
- }
-
- # Update workflow object
- self.workflow.totalActions = total_actions
-
- # Update in database
- self.services.interfaceDbChat.updateWorkflow(self.workflow.id, update_data)
- logger.info(f"Updated workflow {self.workflow.id} after action planning: {update_data}")
-
- except Exception as e:
- logger.error(f"Error updating workflow after action planning: {str(e)}")
-
- def updateWorkflowBeforeExecutingAction(self, action_number: int):
- """
- Update workflow object before executing an action.
- Rule: Set currentAction=action_number
- """
- try:
- update_data = {
- "currentAction": action_number
- }
-
- # Update workflow object
- self.workflow.currentAction = action_number
-
- # Update in database
- self.services.interfaceDbChat.updateWorkflow(self.workflow.id, update_data)
- logger.info(f"Updated workflow {self.workflow.id} before executing action {action_number}: {update_data}")
-
- except Exception as e:
- logger.error(f"Error updating workflow before executing action: {str(e)}")
-
- def setWorkflowTotals(self, total_tasks: int = None, total_actions: int = None):
- """Set total counts for workflow progress tracking and update database"""
- try:
- update_data = {}
-
- if total_tasks is not None:
- self.workflow.totalTasks = total_tasks
- update_data["totalTasks"] = total_tasks
-
- if total_actions is not None:
- self.workflow.totalActions = total_actions
- update_data["totalActions"] = total_actions
-
- # Update workflow object in database if we have changes
- if update_data:
- self.services.interfaceDbChat.updateWorkflow(self.workflow.id, update_data)
- logger.info(f"Updated workflow {self.workflow.id} totals in database: {update_data}")
-
- logger.debug(f"Updated workflow totals: Tasks {self.workflow.totalTasks if hasattr(self.workflow, 'totalTasks') else 'N/A'}, Actions {self.workflow.totalActions if hasattr(self.workflow, 'totalActions') else 'N/A'}")
- except Exception as e:
- logger.error(f"Error setting workflow totals: {str(e)}")
-
- def resetWorkflowForNewSession(self):
- """Reset workflow values for a new workflow session"""
- try:
- # Reset all workflow progress values to initial state
- self.workflow.currentRound = 0
- self.workflow.currentTask = 0
- self.workflow.currentAction = 0
- self.workflow.totalTasks = 0
- self.workflow.totalActions = 0
- self.workflow.status = 'ready'
-
- # Update workflow object in database with reset values
- self.services.interfaceDbChat.updateWorkflow(self.workflow.id, {
- "currentRound": 0,
- "currentTask": 0,
- "currentAction": 0,
- "totalTasks": 0,
- "totalActions": 0,
- "status": "ready"
- })
-
- logger.info("Workflow reset for new session - all values set to initial state and updated in database")
- except Exception as e:
- logger.error(f"Error resetting workflow for new session: {str(e)}")
-
- # ===== Functions moved from serviceCenter =====
-
- async def executeAction(self, methodName: str, actionName: str, parameters: Dict[str, Any]) -> ActionResult:
- """Execute a method action"""
- try:
- if methodName not in methods:
- raise ValueError(f"Unknown method: {methodName}")
-
- method = methods[methodName]
- if actionName not in method['actions']:
- raise ValueError(f"Unknown action: {actionName} for method {methodName}")
-
- action = method['actions'][actionName]
-
- # Execute the action
- return await action['method'](parameters)
-
- except Exception as e:
- logger.error(f"Error executing method {methodName}.{actionName}: {str(e)}")
- raise
-
- def writeTraceLog(self, contextText: str, data: Any) -> None:
- """Write trace data to configured trace file if in debug mode"""
- try:
- import logging
- import os
- from datetime import datetime, UTC
-
- # Only write if logger is in debug mode
- if logger.level > logging.DEBUG:
- return
-
- # Get log directory from configuration
- logDir = self.services.utils.configGet("APP_LOGGING_LOG_DIR", "./")
- if not os.path.isabs(logDir):
- # If relative path, make it relative to the gateway directory
- gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
- logDir = os.path.join(gatewayDir, logDir)
-
- # Ensure log directory exists
- os.makedirs(logDir, exist_ok=True)
-
- # Create trace file path
- trace_file = os.path.join(logDir, "log_trace.log")
-
- # Format the trace entry
- timestamp = datetime.fromtimestamp(self.services.utils.getUtcTimestamp(), UTC).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
- trace_entry = f"[{timestamp}] {contextText}\n"
-
- # Add data if provided - show full content without truncation
- if data is not None:
- if isinstance(data, (dict, list)):
- import json
- # Use ensure_ascii=False to preserve Unicode characters and indent=2 for readability
- trace_entry += f"Data: {json.dumps(data, indent=2, default=str, ensure_ascii=False)}\n"
- else:
- # For string data, show full content without truncation
- trace_entry += f"Data: {str(data)}\n"
-
- trace_entry += "-" * 80 + "\n\n"
-
- # Write to trace file
- with open(trace_file, "a", encoding="utf-8") as f:
- f.write(trace_entry)
-
- except Exception as e:
- # Don't log trace errors to avoid recursion
- pass
-
- def clearTraceLog(self) -> None:
- """Clear the trace log file"""
- try:
- import logging
- import os
-
- # Get log directory from configuration
- logDir = self.services.utils.configGet("APP_LOGGING_LOG_DIR", "./")
- if not os.path.isabs(logDir):
- # If relative path, make it relative to the gateway directory
- gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
- logDir = os.path.join(gatewayDir, logDir)
-
- # Create trace file path
- trace_file = os.path.join(logDir, "log_trace.log")
-
- # Only clear if logger is in debug mode
- if logger.level > logging.DEBUG:
- # Delete file if not in debug mode
- if os.path.exists(trace_file):
- os.remove(trace_file)
- return
-
- # Create empty file if in debug mode
- with open(trace_file, "w", encoding="utf-8") as f:
- f.write("")
-
- except Exception as e:
- # Don't log trace errors to avoid recursion
- pass
diff --git a/modules/workflows/processing/modes/__init__.py b/modules/workflows/processing/modes/__init__.py
new file mode 100644
index 00000000..084eda51
--- /dev/null
+++ b/modules/workflows/processing/modes/__init__.py
@@ -0,0 +1 @@
+# Workflow mode implementations
diff --git a/modules/workflows/processing/modes/actionplanMode.py b/modules/workflows/processing/modes/actionplanMode.py
new file mode 100644
index 00000000..82165a9a
--- /dev/null
+++ b/modules/workflows/processing/modes/actionplanMode.py
@@ -0,0 +1,831 @@
+# actionplanMode.py
+# Actionplan mode implementation for workflows
+
+import json
+import logging
+import uuid
+from typing import List, Dict, Any
+from modules.datamodels.datamodelWorkflow import (
+ TaskStep, TaskContext, TaskResult, TaskAction, TaskStatus,
+ ActionResult, ReviewResult, ReviewContext
+)
+from modules.datamodels.datamodelChat import ChatWorkflow
+from modules.datamodels.datamodelAi import AiCallOptions, OperationType, ProcessingMode, Priority
+from modules.workflows.processing.modes.baseMode import BaseMode
+from modules.workflows.processing.shared.executionState import TaskExecutionState
+from modules.workflows.processing.shared.promptFactoryPlaceholders import (
+ createActionDefinitionPromptTemplate,
+ createResultReviewPromptTemplate,
+ extractUserPrompt,
+ extractAvailableDocuments,
+ extractWorkflowHistory,
+ extractAvailableMethods,
+ extractUserLanguage,
+ extractReviewContent
+)
+
+logger = logging.getLogger(__name__)
+
+class ActionplanMode(BaseMode):
+ """Actionplan mode implementation - batch planning and sequential execution"""
+
+ def __init__(self, services, workflow):
+ super().__init__(services, workflow)
+
+ async def generateTaskActions(self, taskStep: TaskStep, workflow: ChatWorkflow,
+ previousResults: List = None, enhancedContext: TaskContext = None) -> List[TaskAction]:
+ """Generate actions for a given task step using batch planning approach"""
+ try:
+ # Check workflow status before generating actions
+ self._checkWorkflowStopped(workflow)
+
+ retryInfo = f" (Retry #{enhancedContext.retry_count})" if enhancedContext and enhancedContext.retry_count > 0 else ""
+ logger.info(f"Generating actions for task: {taskStep.objective}{retryInfo}")
+
+ # Log criteria progress if this is a retry
+ if enhancedContext and hasattr(enhancedContext, 'criteria_progress') and enhancedContext.criteria_progress is not None:
+ progress = enhancedContext.criteria_progress
+ logger.info(f"Retry attempt {enhancedContext.retry_count} - Criteria progress:")
+ if progress.get('met_criteria'):
+ logger.info(f" Met criteria: {', '.join(progress['met_criteria'])}")
+ if progress.get('unmet_criteria'):
+ logger.warning(f" Unmet criteria: {', '.join(progress['unmet_criteria'])}")
+
+ # Show improvement trends
+ if progress.get('attempt_history'):
+ recentAttempts = progress['attempt_history'][-2:] # Last 2 attempts
+ if len(recentAttempts) >= 2:
+ prevScore = recentAttempts[0].get('quality_score', 0)
+ currScore = recentAttempts[1].get('quality_score', 0)
+ if currScore > prevScore:
+ logger.info(f" Quality improving: {prevScore} -> {currScore}")
+ elif currScore < prevScore:
+ logger.warning(f" Quality declining: {prevScore} -> {currScore}")
+ else:
+ logger.info(f" Quality stable: {currScore}")
+
+ # Enhanced retry context logging
+ if enhancedContext and enhancedContext.retry_count > 0:
+ logger.info("=== RETRY CONTEXT FOR ACTION GENERATION ===")
+ logger.info(f"Retry Count: {enhancedContext.retry_count}")
+ logger.debug(f"Previous Improvements: {enhancedContext.improvements}")
+ logger.debug(f"Previous Review Result: {enhancedContext.previous_review_result}")
+ logger.debug(f"Failure Patterns: {enhancedContext.failure_patterns}")
+ logger.debug(f"Failed Actions: {enhancedContext.failed_actions}")
+ logger.debug(f"Successful Actions: {enhancedContext.successful_actions}")
+ logger.info("=== END RETRY CONTEXT ===")
+
+ # Log that we're starting action generation
+ logger.info("=== STARTING ACTION GENERATION ===")
+
+ # Create proper context object for action definition
+ if enhancedContext and isinstance(enhancedContext, TaskContext):
+ # Use existing TaskContext if provided
+ actionContext = TaskContext(
+ task_step=enhancedContext.task_step,
+ workflow=enhancedContext.workflow,
+ workflow_id=enhancedContext.workflow_id,
+ available_documents=enhancedContext.available_documents,
+ available_connections=enhancedContext.available_connections,
+ previous_results=enhancedContext.previous_results or previousResults or [],
+ previous_handover=enhancedContext.previous_handover,
+ improvements=enhancedContext.improvements or [],
+ retry_count=enhancedContext.retry_count or 0,
+ previous_action_results=enhancedContext.previous_action_results or [],
+ previous_review_result=enhancedContext.previous_review_result,
+ is_regeneration=enhancedContext.is_regeneration or False,
+ failure_patterns=enhancedContext.failure_patterns or [],
+ failed_actions=enhancedContext.failed_actions or [],
+ successful_actions=enhancedContext.successful_actions or [],
+ criteria_progress=enhancedContext.criteria_progress
+ )
+ else:
+ # Create new context from scratch
+ actionContext = TaskContext(
+ task_step=taskStep,
+ workflow=workflow,
+ workflow_id=workflow.id,
+ available_documents=None,
+ available_connections=None,
+ previous_results=previousResults or [],
+ previous_handover=None,
+ improvements=[],
+ retry_count=0,
+ previous_action_results=[],
+ previous_review_result=None,
+ is_regeneration=False,
+ failure_patterns=[],
+ failed_actions=[],
+ successful_actions=[],
+ criteria_progress=None
+ )
+
+ # Check workflow status before calling AI service
+ self._checkWorkflowStopped(workflow)
+
+ # Generate the action definition prompt with placeholders
+ actionPromptTemplate = createActionDefinitionPromptTemplate()
+
+ # Extract content for placeholders
+ userPrompt = extractUserPrompt(actionContext)
+ availableDocuments = extractAvailableDocuments(actionContext)
+ workflowHistory = extractWorkflowHistory(self.services, actionContext)
+ availableMethods = extractAvailableMethods(self.services)
+ userLanguage = extractUserLanguage(self.services)
+
+ # Create placeholders dictionary
+ placeholders = {
+ "USER_PROMPT": userPrompt,
+ "AVAILABLE_DOCUMENTS": availableDocuments,
+ "WORKFLOW_HISTORY": workflowHistory,
+ "AVAILABLE_METHODS": availableMethods,
+ "USER_LANGUAGE": userLanguage
+ }
+
+ # Trace action planning prompt
+ self._writeTraceLog("Action Plan Prompt", actionPromptTemplate)
+ self._writeTraceLog("Action Plan Placeholders", placeholders)
+
+ # Centralized AI call: Action planning (quality, detailed) with placeholders
+ options = AiCallOptions(
+ operationType=OperationType.GENERATE_PLAN,
+ priority=Priority.QUALITY,
+ compressPrompt=False,
+ compressContext=False,
+ processingMode=ProcessingMode.DETAILED,
+ maxCost=0.10,
+ maxProcessingTime=30
+ )
+
+ prompt = await self.services.ai.callAi(
+ prompt=actionPromptTemplate,
+ placeholders=placeholders,
+ options=options
+ )
+
+ # Check if AI response is valid
+ if not prompt:
+ raise ValueError("AI service returned no response")
+
+ # Log action response received
+ logger.info("=== ACTION PLAN AI RESPONSE RECEIVED ===")
+ logger.info(f"Response length: {len(prompt) if prompt else 0}")
+ # Trace action planning response
+ self._writeTraceLog("Action Plan Response", prompt)
+
+ # Parse action response
+ jsonStart = prompt.find('{')
+ jsonEnd = prompt.rfind('}') + 1
+ if jsonStart == -1 or jsonEnd == 0:
+ raise ValueError("No JSON found in response")
+ jsonStr = prompt[jsonStart:jsonEnd]
+
+ try:
+ actionData = json.loads(jsonStr)
+ except Exception as e:
+ logger.error(f"Error parsing action response JSON: {str(e)}")
+ actionData = {}
+
+ if 'actions' not in actionData:
+ raise ValueError("Action response missing 'actions' field")
+
+ actions = actionData['actions']
+ if not actions:
+ raise ValueError("Action response contains empty actions list")
+
+ if not isinstance(actions, list):
+ raise ValueError(f"Action response 'actions' field is not a list: {type(actions)}")
+
+ if not self.validator.validateAction(actions, actionContext):
+ logger.error("Generated actions failed validation")
+ raise Exception("AI-generated actions failed validation - AI is required for action generation")
+
+ # Convert to TaskAction objects
+ taskActions = []
+ for i, a in enumerate(actions):
+ if not isinstance(a, dict):
+ logger.warning(f"Skipping invalid action {i+1}: not a dictionary")
+ continue
+
+
+ taskAction = self._createTaskAction({
+ "execMethod": a.get('method', 'unknown'),
+ "execAction": a.get('action', 'unknown'),
+ "execParameters": a.get('parameters', {}),
+ "execResultLabel": a.get('resultLabel', ''),
+ "expectedDocumentFormats": a.get('expectedDocumentFormats', None),
+ "status": TaskStatus.PENDING,
+ # Extract user-friendly message if available
+ "userMessage": a.get('userMessage', None)
+ })
+
+ if taskAction:
+ taskActions.append(taskAction)
+ else:
+ logger.warning(f"Skipping invalid action {i+1}: failed to create TaskAction")
+
+ validActions = [ta for ta in taskActions if ta]
+
+ if not validActions:
+ raise ValueError("No valid actions could be created from AI response")
+
+ return validActions
+ except Exception as e:
+ logger.error(f"Error in generateTaskActions: {str(e)}")
+ return []
+
+ async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext,
+ taskIndex: int = None, totalTasks: int = None) -> TaskResult:
+ """Execute all actions for a task step using Actionplan mode"""
+ logger.info(f"=== STARTING TASK {taskIndex or '?'}: {taskStep.objective} ===")
+
+ # Update workflow object before executing task
+ if taskIndex is not None:
+ self._updateWorkflowBeforeExecutingTask(taskIndex)
+
+ # Update workflow context for this task
+ if taskIndex is not None:
+ self.services.workflow.setWorkflowContext(task_number=taskIndex)
+
+ # Create task start message
+ await self.messageCreator.createTaskStartMessage(taskStep, workflow, taskIndex, totalTasks)
+
+ state = TaskExecutionState(taskStep)
+ retryContext = context
+ maxRetries = state.max_retries
+
+ for attempt in range(maxRetries):
+ logger.info(f"Task execution attempt {attempt+1}/{maxRetries}")
+
+ # Check workflow status before starting task execution
+ self._checkWorkflowStopped(workflow)
+
+ # Update retry context with current attempt information
+ if retryContext:
+ retryContext.retry_count = attempt + 1
+
+ actions = await self.generateTaskActions(taskStep, workflow,
+ previousResults=retryContext.previous_results,
+ enhancedContext=retryContext)
+
+ # Log total actions count for this task
+ totalActions = len(actions) if actions else 0
+ logger.info(f"Task {taskIndex or '?'} has {totalActions} actions")
+
+ # Update workflow object after action planning
+ self._updateWorkflowAfterActionPlanning(totalActions)
+ self._setWorkflowTotals(totalActions=totalActions)
+
+ if not actions:
+ logger.error("No actions defined for task step, aborting task execution")
+ break
+
+ actionResults = []
+ for actionIdx, action in enumerate(actions):
+ # Check workflow status before each action execution
+ self._checkWorkflowStopped(workflow)
+
+ # Update workflow object before executing action
+ actionNumber = actionIdx + 1
+ self._updateWorkflowBeforeExecutingAction(actionNumber)
+
+ # Update workflow context for this action
+ self.services.workflow.setWorkflowContext(action_number=actionNumber)
+
+ # Log action start
+ logger.info(f"Task {taskIndex} - Starting action {actionNumber}/{totalActions}")
+
+ # Create action start message
+ actionStartMessage = {
+ "workflowId": workflow.id,
+ "role": "assistant",
+ "message": f"β‘ **Action {actionNumber}/{totalActions}** (Method {action.execMethod}.{action.execAction})",
+ "status": "step",
+ "sequenceNr": len(workflow.messages) + 1,
+ "publishedAt": self.services.utils.getUtcTimestamp(),
+ "documentsLabel": f"action_{actionNumber}_start",
+ "documents": [],
+ "actionProgress": "running",
+ "roundNumber": workflow.currentRound,
+ "taskNumber": taskIndex,
+ "actionNumber": actionNumber
+ }
+
+ # Add user-friendly message if available
+ if action.userMessage:
+ actionStartMessage["message"] += f"\n\n㪠{action.userMessage}"
+
+ message = self.services.interfaceDbChat.createMessage(actionStartMessage)
+ if message:
+ workflow.messages.append(message)
+ logger.info(f"Action start message created for action {actionNumber}")
+
+ # Execute single action
+ result = await self.actionExecutor.executeSingleAction(action, workflow, taskStep,
+ taskIndex, actionNumber, totalActions)
+ actionResults.append(result)
+
+ if result.success:
+ state.addSuccessfulAction(result)
+ else:
+ state.addFailedAction(result)
+
+ # Check workflow status before review
+ self._checkWorkflowStopped(workflow)
+
+ reviewResult = await self._reviewTaskCompletion(taskStep, actions, actionResults, workflow)
+ success = reviewResult.status == 'success'
+ feedback = reviewResult.reason
+ error = None if success else reviewResult.reason
+
+ if success:
+ logger.info(f"=== TASK {taskIndex or '?'} COMPLETED SUCCESSFULLY: {taskStep.objective} ===")
+
+ # Create task completion message
+ await self.messageCreator.createTaskCompletionMessage(taskStep, workflow, taskIndex, totalTasks, reviewResult)
+
+ return TaskResult(
+ taskId=taskStep.id,
+ status=TaskStatus.COMPLETED,
+ success=True,
+ feedback=feedback,
+ error=None
+ )
+
+ elif reviewResult.status == 'retry' and state.canRetry():
+ logger.warning(f"Task step '{taskStep.objective}' requires retry: {reviewResult.improvements}")
+
+ # Enhanced logging of criteria status
+ if reviewResult.met_criteria:
+ logger.info(f"Met criteria: {', '.join(reviewResult.met_criteria)}")
+ if reviewResult.unmet_criteria:
+ logger.warning(f"Unmet criteria: {', '.join(reviewResult.unmet_criteria)}")
+
+ state.incrementRetryCount()
+
+ # Update retry context with retry information and criteria tracking
+ if retryContext:
+ retryContext.retry_count = state.retry_count
+ retryContext.improvements = reviewResult.improvements
+ retryContext.previous_action_results = actionResults
+ retryContext.previous_review_result = reviewResult
+ retryContext.is_regeneration = True
+ retryContext.failure_patterns = state.getFailurePatterns()
+ retryContext.failed_actions = state.failed_actions
+ retryContext.successful_actions = state.successful_actions
+
+ # Track criteria progress across retries
+ if not hasattr(retryContext, 'criteria_progress'):
+ retryContext.criteria_progress = {
+ 'met_criteria': set(),
+ 'unmet_criteria': set(),
+ 'attempt_history': []
+ }
+
+ # Update criteria progress
+ if reviewResult.met_criteria:
+ retryContext.criteria_progress['met_criteria'].update(reviewResult.met_criteria)
+ if reviewResult.unmet_criteria:
+ retryContext.criteria_progress['unmet_criteria'].update(reviewResult.unmet_criteria)
+
+ # Record this attempt's criteria status
+ attemptRecord = {
+ 'attempt': state.retry_count,
+ 'met_criteria': reviewResult.met_criteria or [],
+ 'unmet_criteria': reviewResult.unmet_criteria or [],
+ 'quality_score': reviewResult.quality_score,
+ 'improvements': reviewResult.improvements or []
+ }
+ retryContext.criteria_progress['attempt_history'].append(attemptRecord)
+
+ # Create retry message
+ await self.messageCreator.createRetryMessage(taskStep, workflow, taskIndex, reviewResult)
+
+ continue
+ else:
+ logger.error(f"=== TASK {taskIndex or '?'} FAILED: {taskStep.objective} after {attempt+1} attempts ===")
+
+ # Create error message
+ await self.messageCreator.createErrorMessage(taskStep, workflow, taskIndex, reviewResult.reason)
+
+ return TaskResult(
+ taskId=taskStep.id,
+ status=TaskStatus.FAILED,
+ success=False,
+ feedback=feedback,
+ error=reviewResult.reason if reviewResult and hasattr(reviewResult, 'reason') else "Task failed after retry attempts"
+ )
+
+ logger.error(f"=== TASK {taskIndex or '?'} FAILED AFTER ALL RETRIES: {taskStep.objective} ===")
+
+ # Create final error message
+ await self.messageCreator.createErrorMessage(taskStep, workflow, taskIndex, "Task failed after all retries")
+
+ return TaskResult(
+ taskId=taskStep.id,
+ status=TaskStatus.FAILED,
+ success=False,
+ feedback="Task failed after all retries.",
+ error="Task failed after all retries."
+ )
+
+ async def _reviewTaskCompletion(self, taskStep: TaskStep, taskActions: List[TaskAction],
+ actionResults: List[ActionResult], workflow: ChatWorkflow) -> ReviewResult:
+ """Review task completion and determine success/failure/retry"""
+ try:
+ # Check workflow status before reviewing task completion
+ self._checkWorkflowStopped(workflow)
+
+ logger.info(f"=== STARTING TASK COMPLETION REVIEW ===")
+ logger.info(f"Task: {taskStep.objective}")
+ logger.info(f"Actions executed: {len(taskActions) if taskActions else 0}")
+ logger.info(f"Action results: {len(actionResults) if actionResults else 0}")
+
+ # Create proper context object for result review
+ reviewContext = ReviewContext(
+ task_step=taskStep,
+ task_actions=taskActions,
+ action_results=actionResults,
+ step_result={
+ 'successful_actions': sum(1 for result in actionResults if result.success),
+ 'total_actions': len(actionResults),
+ 'results': [self._extractResultText(result) for result in actionResults if result.success],
+ 'errors': [result.error for result in actionResults if not result.success],
+ 'documents': [
+ {
+ 'action_index': i,
+ 'documents_count': len(result.documents) if result.documents else 0,
+ 'documents': result.documents if result.documents else []
+ }
+ for i, result in enumerate(actionResults)
+ ]
+ },
+ workflow_id=workflow.id,
+ previous_results=[]
+ )
+
+ # Check workflow status before calling AI service
+ self._checkWorkflowStopped(workflow)
+
+ # Use placeholder-based review prompt
+ promptTemplate = createResultReviewPromptTemplate()
+
+ # Extract content for placeholders
+ userPrompt = extractUserPrompt(reviewContext)
+ reviewContent = extractReviewContent(reviewContext)
+
+ # Create placeholders dictionary
+ placeholders = {
+ "USER_PROMPT": userPrompt,
+ "REVIEW_CONTENT": reviewContent
+ }
+
+ # Log result review prompt sent to AI
+ logger.info("=== RESULT REVIEW PROMPT SENT TO AI ===")
+ logger.info(f"Task: {taskStep.objective}")
+ logger.info(f"Action Results Count: {len(reviewContext.action_results) if reviewContext.action_results else 0}")
+ logger.info(f"Task Actions Count: {len(reviewContext.task_actions) if reviewContext.task_actions else 0}")
+ # Trace result review prompt
+ self._writeTraceLog("Result Review Prompt", promptTemplate)
+ self._writeTraceLog("Result Review Placeholders", placeholders)
+
+ # Centralized AI call: Result validation (balanced analysis) with placeholders
+ options = AiCallOptions(
+ operationType=OperationType.ANALYSE_CONTENT,
+ priority=Priority.BALANCED,
+ compressPrompt=True,
+ compressContext=False,
+ processingMode=ProcessingMode.ADVANCED,
+ maxCost=0.05,
+ maxProcessingTime=30
+ )
+
+ response = await self.services.ai.callAi(
+ prompt=promptTemplate,
+ placeholders=placeholders,
+ options=options
+ )
+
+ # Log result review response received
+ logger.info("=== RESULT REVIEW AI RESPONSE RECEIVED ===")
+ logger.info(f"Response length: {len(response) if response else 0}")
+ # Trace result review response
+ self._writeTraceLog("Result Review Response", response)
+
+ # Parse review response
+ jsonStart = response.find('{')
+ jsonEnd = response.rfind('}') + 1
+ if jsonStart == -1 or jsonEnd == 0:
+ raise ValueError("No JSON found in review response")
+ jsonStr = response[jsonStart:jsonEnd]
+
+ try:
+ review = json.loads(jsonStr)
+ except Exception as e:
+ logger.error(f"Error parsing review response JSON: {str(e)}")
+ review = {}
+ if 'status' not in review:
+ raise ValueError("Review response missing 'status' field")
+ review.setdefault('status', 'unknown')
+ review.setdefault('reason', 'No reason provided')
+ review.setdefault('quality_score', 5)
+
+ # Ensure improvements is a list
+ improvements = review.get('improvements', [])
+ if isinstance(improvements, str):
+ # Split string into list if it's a single improvement
+ improvements = [improvements.strip()] if improvements.strip() else []
+ elif not isinstance(improvements, list):
+ improvements = []
+
+ # Ensure all list fields are properly typed
+ metCriteria = review.get('met_criteria', [])
+ if not isinstance(metCriteria, list):
+ metCriteria = []
+
+ unmetCriteria = review.get('unmet_criteria', [])
+ if not isinstance(unmetCriteria, list):
+ unmetCriteria = []
+
+ reviewResult = ReviewResult(
+ status=review.get('status', 'unknown'),
+ reason=review.get('reason', 'No reason provided'),
+ improvements=improvements,
+ quality_score=review.get('quality_score', 5),
+ missing_outputs=[],
+ met_criteria=metCriteria,
+ unmet_criteria=unmetCriteria,
+ confidence=review.get('confidence', 0.5),
+ # Extract user-friendly message if available
+ userMessage=review.get('userMessage', None)
+ )
+
+ # Enhanced validation logging
+ logger.info(f"VALIDATION RESULT - Task: '{taskStep.objective}' - Status: {reviewResult.status.upper()}, Quality: {reviewResult.quality_score}/10")
+ if reviewResult.status == 'success':
+ logger.info(f"VALIDATION SUCCESS - Task completed successfully")
+ if reviewResult.met_criteria:
+ logger.info(f"Met criteria: {', '.join(reviewResult.met_criteria)}")
+ elif reviewResult.status == 'retry':
+ logger.warning(f"VALIDATION RETRY - Task requires retry: {reviewResult.improvements}")
+ if reviewResult.unmet_criteria:
+ logger.warning(f"Unmet criteria: {', '.join(reviewResult.unmet_criteria)}")
+ else:
+ logger.error(f"VALIDATION FAILED - Task failed: {reviewResult.reason}")
+
+ logger.info(f"=== TASK COMPLETION REVIEW FINISHED ===")
+ logger.info(f"Final Status: {reviewResult.status}")
+ logger.info(f"Quality Score: {reviewResult.quality_score}/10")
+ logger.info(f"Improvements: {reviewResult.improvements}")
+ logger.info("=== END REVIEW ===")
+
+ return reviewResult
+ except Exception as e:
+ logger.error(f"Error in reviewTaskCompletion: {str(e)}")
+ return ReviewResult(
+ status='failed',
+ reason=str(e),
+ quality_score=0
+ )
+
+ def _createTaskAction(self, actionData: Dict[str, Any]) -> TaskAction:
+ """Creates a new task action"""
+ try:
+ # Ensure ID is present
+ if "id" not in actionData or not actionData["id"]:
+ actionData["id"] = f"action_{uuid.uuid4()}"
+
+ # Ensure required fields
+ if "status" not in actionData:
+ actionData["status"] = TaskStatus.PENDING
+
+ if "execMethod" not in actionData:
+ logger.error("execMethod is required for task action")
+ return None
+
+ if "execAction" not in actionData:
+ logger.error("execAction is required for task action")
+ return None
+
+ if "execParameters" not in actionData:
+ actionData["execParameters"] = {}
+
+ # Use generic field separation based on TaskAction model
+ simpleFields, objectFields = self.services.interfaceDbChat._separate_object_fields(TaskAction, actionData)
+
+ # Create action in database
+ createdAction = self.services.interfaceDbChat.db.recordCreate(TaskAction, simpleFields)
+
+ # Convert to TaskAction model
+ return TaskAction(
+ id=createdAction["id"],
+ execMethod=createdAction["execMethod"],
+ execAction=createdAction["execAction"],
+ execParameters=createdAction.get("execParameters", {}),
+ execResultLabel=createdAction.get("execResultLabel"),
+ expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
+ status=createdAction.get("status", TaskStatus.PENDING),
+ error=createdAction.get("error"),
+ retryCount=createdAction.get("retryCount", 0),
+ retryMax=createdAction.get("retryMax", 3),
+ processingTime=createdAction.get("processingTime"),
+ timestamp=float(createdAction.get("timestamp", self.services.utils.getUtcTimestamp())),
+ result=createdAction.get("result"),
+ resultDocuments=createdAction.get("resultDocuments", []),
+ userMessage=createdAction.get("userMessage")
+ )
+
+ except Exception as e:
+ logger.error(f"Error creating task action: {str(e)}")
+ return None
+
+ def _extractResultText(self, result: ActionResult) -> str:
+ """Extract result text from ActionResult documents"""
+ if not result.success or not result.documents:
+ return ""
+
+ # Extract text directly from ActionDocument objects
+ resultParts = []
+ for doc in result.documents:
+ if hasattr(doc, 'documentData') and doc.documentData:
+ resultParts.append(str(doc.documentData))
+
+ # Join all document results with separators
+ return "\n\n---\n\n".join(resultParts) if resultParts else ""
+
+ def _updateWorkflowBeforeExecutingTask(self, taskNumber: int):
+ """Update workflow object before executing a task"""
+ try:
+ updateData = {
+ "currentTask": taskNumber,
+ "currentAction": 0,
+ "totalActions": 0
+ }
+
+ # Update workflow object
+ self.workflow.currentTask = taskNumber
+ self.workflow.currentAction = 0
+ self.workflow.totalActions = 0
+
+ # Update in database
+ self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
+ logger.info(f"Updated workflow {self.workflow.id} before executing task {taskNumber}: {updateData}")
+
+ except Exception as e:
+ logger.error(f"Error updating workflow before executing task: {str(e)}")
+
+ def _updateWorkflowAfterActionPlanning(self, totalActions: int):
+ """Update workflow object after action planning for current task"""
+ try:
+ updateData = {
+ "totalActions": totalActions
+ }
+
+ # Update workflow object
+ self.workflow.totalActions = totalActions
+
+ # Update in database
+ self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
+ logger.info(f"Updated workflow {self.workflow.id} after action planning: {updateData}")
+
+ except Exception as e:
+ logger.error(f"Error updating workflow after action planning: {str(e)}")
+
+ def _updateWorkflowBeforeExecutingAction(self, actionNumber: int):
+ """Update workflow object before executing an action"""
+ try:
+ updateData = {
+ "currentAction": actionNumber
+ }
+
+ # Update workflow object
+ self.workflow.currentAction = actionNumber
+
+ # Update in database
+ self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
+ logger.info(f"Updated workflow {self.workflow.id} before executing action {actionNumber}: {updateData}")
+
+ except Exception as e:
+ logger.error(f"Error updating workflow before executing action: {str(e)}")
+
+ def _setWorkflowTotals(self, totalTasks: int = None, totalActions: int = None):
+ """Set total counts for workflow progress tracking and update database"""
+ try:
+ updateData = {}
+
+ if totalTasks is not None:
+ self.workflow.totalTasks = totalTasks
+ updateData["totalTasks"] = totalTasks
+
+ if totalActions is not None:
+ self.workflow.totalActions = totalActions
+ updateData["totalActions"] = totalActions
+
+ # Update workflow object in database if we have changes
+ if updateData:
+ self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
+ logger.info(f"Updated workflow {self.workflow.id} totals in database: {updateData}")
+
+ logger.debug(f"Updated workflow totals: Tasks {self.workflow.totalTasks if hasattr(self.workflow, 'totalTasks') else 'N/A'}, Actions {self.workflow.totalActions if hasattr(self.workflow, 'totalActions') else 'N/A'}")
+ except Exception as e:
+ logger.error(f"Error setting workflow totals: {str(e)}")
+
+ def _createTaskAction(self, actionData: Dict[str, Any]) -> TaskAction:
+ """Creates a new task action"""
+ try:
+ import uuid
+
+ # Ensure ID is present
+ if "id" not in actionData or not actionData["id"]:
+ actionData["id"] = f"action_{uuid.uuid4()}"
+
+ # Ensure required fields
+ if "status" not in actionData:
+ actionData["status"] = TaskStatus.PENDING
+
+ if "execMethod" not in actionData:
+ logger.error("execMethod is required for task action")
+ return None
+
+ if "execAction" not in actionData:
+ logger.error("execAction is required for task action")
+ return None
+
+ if "execParameters" not in actionData:
+ actionData["execParameters"] = {}
+
+ # Use generic field separation based on TaskAction model
+ simpleFields, objectFields = self.services.interfaceDbChat._separate_object_fields(TaskAction, actionData)
+
+ # Create action in database
+ createdAction = self.services.interfaceDbChat.db.recordCreate(TaskAction, simpleFields)
+
+ # Convert to TaskAction model
+ return TaskAction(
+ id=createdAction["id"],
+ execMethod=createdAction["execMethod"],
+ execAction=createdAction["execAction"],
+ execParameters=createdAction.get("execParameters", {}),
+ execResultLabel=createdAction.get("execResultLabel"),
+ expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
+ status=createdAction.get("status", TaskStatus.PENDING),
+ error=createdAction.get("error"),
+ retryCount=createdAction.get("retryCount", 0),
+ retryMax=createdAction.get("retryMax", 3),
+ processingTime=createdAction.get("processingTime"),
+ timestamp=float(createdAction.get("timestamp", self.services.utils.getUtcTimestamp())),
+ result=createdAction.get("result"),
+ resultDocuments=createdAction.get("resultDocuments", []),
+ userMessage=createdAction.get("userMessage")
+ )
+
+ except Exception as e:
+ logger.error(f"Error creating task action: {str(e)}")
+ return None
+
+ def _writeTraceLog(self, contextText: str, data: Any) -> None:
+ """Write trace data to configured trace file if in debug mode"""
+ try:
+ import os
+ import json
+ from datetime import datetime, UTC
+
+ # Only write if logger is in debug mode
+ if logger.level > logging.DEBUG:
+ return
+
+ # Get log directory from configuration
+ logDir = self.services.utils.configGet("APP_LOGGING_LOG_DIR", "./")
+ if not os.path.isabs(logDir):
+ # If relative path, make it relative to the gateway directory
+ gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
+ logDir = os.path.join(gatewayDir, logDir)
+
+ # Ensure log directory exists
+ os.makedirs(logDir, exist_ok=True)
+
+ # Create trace file path
+ traceFile = os.path.join(logDir, "log_trace.log")
+
+ # Format the trace entry
+ timestamp = datetime.fromtimestamp(self.services.utils.getUtcTimestamp(), UTC).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
+ traceEntry = f"[{timestamp}] {contextText}\n"
+
+ # Add data if provided - show full content without truncation
+ if data is not None:
+ if isinstance(data, (dict, list)):
+ # Use ensure_ascii=False to preserve Unicode characters and indent=2 for readability
+ traceEntry += f"Data: {json.dumps(data, indent=2, default=str, ensure_ascii=False)}\n"
+ else:
+ # For string data, show full content without truncation
+ traceEntry += f"Data: {str(data)}\n"
+
+ traceEntry += "-" * 80 + "\n\n"
+
+ # Write to trace file
+ with open(traceFile, "a", encoding="utf-8") as f:
+ f.write(traceEntry)
+
+ except Exception as e:
+ # Don't log trace errors to avoid recursion
+ pass
diff --git a/modules/workflows/processing/modes/baseMode.py b/modules/workflows/processing/modes/baseMode.py
new file mode 100644
index 00000000..173bd98b
--- /dev/null
+++ b/modules/workflows/processing/modes/baseMode.py
@@ -0,0 +1,60 @@
+# baseMode.py
+# Abstract base class for workflow modes
+
+from abc import ABC, abstractmethod
+import logging
+from typing import List, Dict, Any
+from modules.datamodels.datamodelWorkflow import TaskStep, TaskContext, TaskResult, TaskAction
+from modules.datamodels.datamodelChat import ChatWorkflow
+from modules.workflows.processing.core.taskPlanner import TaskPlanner
+from modules.workflows.processing.core.actionExecutor import ActionExecutor
+from modules.workflows.processing.core.messageCreator import MessageCreator
+from modules.workflows.processing.core.validator import WorkflowValidator
+
+logger = logging.getLogger(__name__)
+
+class BaseMode(ABC):
+ """Abstract base class for workflow execution modes"""
+
+ def __init__(self, services, workflow):
+ self.services = services
+ self.workflow = workflow
+ self.taskPlanner = TaskPlanner(services)
+ self.actionExecutor = ActionExecutor(services)
+ self.messageCreator = MessageCreator(services)
+ self.validator = WorkflowValidator(services)
+
+ def _checkWorkflowStopped(self, workflow):
+ """Check if workflow has been stopped by user and raise exception if so"""
+ try:
+ # Get the current workflow status from the database to avoid stale data
+ current_workflow = self.services.interfaceDbChat.getWorkflow(workflow.id)
+ if current_workflow and current_workflow.status == "stopped":
+ logger.info("Workflow stopped by user, aborting execution")
+ raise Exception("Workflow was stopped by user")
+ except Exception as e:
+ # If we can't get the current status due to other database issues, fall back to the in-memory object
+ logger.warning(f"Could not check current workflow status from database: {str(e)}")
+ if workflow and workflow.status == "stopped":
+ logger.info("Workflow stopped by user (from in-memory object), aborting execution")
+ raise Exception("Workflow was stopped by user")
+
+ @abstractmethod
+ async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext,
+ taskIndex: int = None, totalTasks: int = None) -> TaskResult:
+ """Execute a task step - must be implemented by concrete modes"""
+ pass
+
+ @abstractmethod
+ async def generateTaskActions(self, taskStep: TaskStep, workflow: ChatWorkflow,
+ previousResults: List = None, enhancedContext: TaskContext = None) -> List[TaskAction]:
+ """Generate actions for a task step - must be implemented by concrete modes"""
+ pass
+
+ async def generateTaskPlan(self, userInput: str, workflow: ChatWorkflow):
+ """Generate task plan - common to all modes"""
+ return await self.taskPlanner.generateTaskPlan(userInput, workflow)
+
+ async def createTaskPlanMessage(self, taskPlan, workflow: ChatWorkflow):
+ """Create task plan message - common to all modes"""
+ return await self.messageCreator.createTaskPlanMessage(taskPlan, workflow)
diff --git a/modules/workflows/processing/modes/reactMode.py b/modules/workflows/processing/modes/reactMode.py
new file mode 100644
index 00000000..a026523e
--- /dev/null
+++ b/modules/workflows/processing/modes/reactMode.py
@@ -0,0 +1,907 @@
+# reactMode.py
+# React mode implementation for workflows
+
+import json
+import logging
+import re
+import time
+from datetime import datetime, timezone
+from typing import List, Dict, Any
+from modules.datamodels.datamodelWorkflow import (
+ TaskStep, TaskContext, TaskResult, TaskAction, TaskStatus,
+ ActionResult
+)
+from modules.datamodels.datamodelChat import ChatWorkflow
+from modules.datamodels.datamodelAi import AiCallOptions, OperationType, ProcessingMode, Priority
+from modules.workflows.processing.modes.baseMode import BaseMode
+from modules.workflows.processing.shared.executionState import TaskExecutionState, should_continue
+from modules.workflows.processing.shared.promptFactoryPlaceholders import (
+ createActionSelectionPromptTemplate,
+ createActionParameterPromptTemplate,
+ createRefinementPromptTemplate,
+ extractUserPrompt,
+ extractAvailableDocuments,
+ extractUserLanguage,
+ extractAvailableMethods,
+ extractReviewContent
+)
+from modules.workflows.processing.shared.promptFactory import getConnectionReferenceList
+from modules.workflows.processing.adaptive import IntentAnalyzer, ContentValidator, LearningEngine, ProgressTracker
+
+logger = logging.getLogger(__name__)
+
+class ReactMode(BaseMode):
+ """React mode implementation - iterative plan-act-observe-refine loop"""
+
+ def __init__(self, services, workflow):
+ super().__init__(services, workflow)
+ # Initialize adaptive components
+ self.intentAnalyzer = IntentAnalyzer()
+ self.contentValidator = ContentValidator()
+ self.learningEngine = LearningEngine()
+ self.progressTracker = ProgressTracker()
+ self.currentIntent = None
+
+ async def generateTaskActions(self, taskStep: TaskStep, workflow: ChatWorkflow,
+ previousResults: List = None, enhancedContext: TaskContext = None) -> List[TaskAction]:
+ """React mode doesn't use batch action generation - actions are generated iteratively"""
+ # React mode generates actions one at a time in the execution loop
+ return []
+
+ async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext,
+ taskIndex: int = None, totalTasks: int = None) -> TaskResult:
+ """Execute task using React mode - iterative plan-act-observe-refine loop"""
+ logger.info(f"=== STARTING TASK {taskIndex or '?'}: {taskStep.objective} ===")
+
+ # NEW: Analyze user intent
+ self.currentIntent = self.intentAnalyzer.analyzeUserIntent(taskStep.objective, context)
+ logger.info(f"Intent analysis: {self.currentIntent}")
+
+ # NEW: Reset progress tracking for new task
+ self.progressTracker.reset()
+
+ # Update workflow object before executing task
+ if taskIndex is not None:
+ self._updateWorkflowBeforeExecutingTask(taskIndex)
+
+ # Update workflow context for this task
+ if taskIndex is not None:
+ self.services.workflow.setWorkflowContext(task_number=taskIndex)
+
+ # Create task start message
+ await self.messageCreator.createTaskStartMessage(taskStep, workflow, taskIndex, totalTasks)
+
+ state = TaskExecutionState(taskStep)
+ # React mode uses max_steps instead of max_retries
+ state.max_steps = max(1, int(getattr(workflow, 'maxSteps', 5)))
+ logger.info(f"Using React mode execution with max_steps: {state.max_steps}")
+
+ step = 1
+ lastReviewDict = None
+
+ while step <= state.max_steps:
+ self._checkWorkflowStopped(workflow)
+
+ # Update workflow[currentAction] for UI
+ self._updateWorkflowBeforeExecutingAction(step)
+ self.services.workflow.setWorkflowContext(action_number=step)
+
+ try:
+ t0 = time.time()
+ selection = await self._planSelect(context)
+ logger.info(f"React step {step}: Selected action: {selection}")
+
+ # Create user-friendly message BEFORE action execution
+ # Action intention message is now handled by the standard message creator in _actExecute
+
+ result = await self._actExecute(context, selection, taskStep, workflow, step)
+ observation = self._observeBuild(result)
+ # Attach deterministic label for clarity
+ observation['resultLabel'] = result.resultLabel
+
+ # NEW: Add content validation
+ if self.currentIntent and result.documents:
+ validationResult = self.contentValidator.validateContent(result.documents, self.currentIntent)
+ observation['contentValidation'] = validationResult
+ logger.info(f"Content validation: {validationResult['overallSuccess']} (quality: {validationResult['qualityScore']:.2f})")
+
+ # NEW: Learn from feedback
+ feedback = self._collectFeedback(result, validationResult, self.currentIntent)
+ self.learningEngine.learnFromFeedback(feedback, context, self.currentIntent)
+
+ # NEW: Update progress
+ self.progressTracker.updateProgress(result, validationResult, self.currentIntent)
+
+ decision = await self._refineDecide(context, observation)
+
+ # Telemetry: simple duration per step
+ duration = time.time() - t0
+ self.services.interfaceDbChat.createLog({
+ "workflowId": workflow.id,
+ "message": f"react_step_duration_sec={duration:.3f}",
+ "type": "info"
+ })
+ lastReviewDict = decision
+
+ # Create user-friendly message AFTER action execution
+ # Action completion message is now handled by the standard message creator in _actExecute
+
+ except Exception as e:
+ logger.error(f"React step {step} error: {e}")
+ break
+
+ # NEW: Use adaptive stopping logic
+ progressState = self.progressTracker.getCurrentProgress()
+ shouldContinue = self.progressTracker.shouldContinue(progressState, observation.get('contentValidation', {}))
+
+ if not shouldContinue or not should_continue(observation, lastReviewDict, step, state.max_steps):
+ logger.info(f"Stopping at step {step}: shouldContinue={shouldContinue}, should_continue={should_continue(observation, lastReviewDict, step, state.max_steps)}")
+ break
+ step += 1
+
+ # Summarize task result for react mode
+ status = TaskStatus.COMPLETED
+ success = True
+ feedback = lastReviewDict.get('reason') if isinstance(lastReviewDict, dict) else 'Completed'
+ if isinstance(lastReviewDict, dict) and lastReviewDict.get('decision') == 'stop':
+ success = True
+
+ # Create task completion message
+ await self.messageCreator.createTaskCompletionMessage(taskStep, workflow, taskIndex, totalTasks,
+ type('ReviewResult', (), {'reason': feedback, 'met_criteria': [], 'quality_score': 8})())
+
+ return TaskResult(
+ taskId=taskStep.id,
+ status=status,
+ success=success,
+ feedback=feedback,
+ error=None if success else feedback
+ )
+
+ async def _planSelect(self, context: TaskContext) -> Dict[str, Any]:
+ """Plan: select exactly one action. Returns {"action": {method, name}}"""
+ promptTemplate = createActionSelectionPromptTemplate()
+
+ # Extract content for placeholders
+ userPrompt = extractUserPrompt(context)
+ # Use same pattern as taskplan mode - extractAvailableDocuments with proper context
+ availableDocuments = extractAvailableDocuments(context)
+ userLanguage = extractUserLanguage(self.services)
+ availableMethods = extractAvailableMethods(self.services)
+
+ # Create placeholders dictionary
+ placeholders = {
+ "USER_PROMPT": userPrompt,
+ "AVAILABLE_DOCUMENTS": availableDocuments,
+ "USER_LANGUAGE": userLanguage,
+ "AVAILABLE_METHODS": availableMethods
+ }
+
+ self._writeTraceLog("React Plan Selection Prompt", promptTemplate)
+ self._writeTraceLog("React Plan Selection Placeholders", placeholders)
+
+ # Centralized AI call for plan selection (use plan generation quality)
+ options = AiCallOptions(
+ operationType=OperationType.GENERATE_PLAN,
+ priority=Priority.QUALITY,
+ compressPrompt=False,
+ compressContext=False,
+ processingMode=ProcessingMode.DETAILED,
+ maxCost=0.10,
+ maxProcessingTime=30
+ )
+
+ response = await self.services.ai.callAi(
+ prompt=promptTemplate,
+ placeholders=placeholders,
+ options=options
+ )
+ self._writeTraceLog("React Plan Selection Response", response)
+ jsonStart = response.find('{') if response else -1
+ jsonEnd = response.rfind('}') + 1 if response else 0
+ if jsonStart == -1 or jsonEnd == 0:
+ raise ValueError("No JSON in selection response")
+ selection = json.loads(response[jsonStart:jsonEnd])
+ if 'action' not in selection or not isinstance(selection['action'], dict):
+ raise ValueError("Selection missing 'action'")
+ return selection
+
+ async def _actExecute(self, context: TaskContext, selection: Dict[str, Any], taskStep: TaskStep,
+ workflow: ChatWorkflow, stepIndex: int) -> ActionResult:
+ """Act: request minimal parameters then execute selected action"""
+ action = selection.get('action', {})
+
+ # Check if parameters are already provided in the action selection
+ if 'parameters' in action and action['parameters']:
+ logger.info("Using parameters from action selection")
+ parameters = action['parameters']
+ else:
+ logger.info("No parameters in action selection, requesting from AI")
+ promptTemplate = createActionParameterPromptTemplate()
+
+ # Extract content for placeholders
+ userPrompt = extractUserPrompt(context)
+ # Use same pattern as taskplan mode - extractAvailableDocuments with proper context
+ availableDocuments = extractAvailableDocuments(context)
+ userLanguage = extractUserLanguage(self.services)
+
+ # Get available connections for React mode
+ availableConnections = getConnectionReferenceList(self.services)
+ availableConnectionsStr = '\n'.join(f"- {conn}" for conn in availableConnections) if availableConnections else "No connections available"
+
+ # Get action parameter description (not function signature)
+ method = action.get('method', '')
+ name = action.get('name', '')
+ actionParameters = ""
+ from modules.workflows.processing.shared.promptFactory import methods
+ if self.services and method in methods:
+ methodInstance = methods[method]['instance']
+ if name in methodInstance.actions:
+ action_info = methodInstance.actions[name]
+ # Extract parameter descriptions from docstring
+ docstring = action_info.get('description', '')
+ paramDescriptions, paramTypes = methodInstance._extractParameterDetails(docstring)
+
+ param_list = []
+ for paramName, paramDesc in paramDescriptions.items():
+ paramType = paramTypes.get(paramName, 'Any')
+ if paramDesc:
+ param_list.append(f"- {paramName} ({paramType}): {paramDesc}")
+ else:
+ param_list.append(f"- {paramName} ({paramType})")
+
+ actionParameters = "Required parameters:\n" + "\n".join(param_list)
+
+ selectedAction = f"{method}.{name}"
+
+ # Create placeholders dictionary
+ placeholders = {
+ "USER_PROMPT": userPrompt,
+ "AVAILABLE_DOCUMENTS": availableDocuments,
+ "AVAILABLE_CONNECTIONS": availableConnectionsStr,
+ "USER_LANGUAGE": userLanguage,
+ "SELECTED_ACTION": selectedAction,
+ "ACTION_SIGNATURE": actionParameters
+ }
+
+ self._writeTraceLog("React Parameters Prompt", promptTemplate)
+ self._writeTraceLog("React Parameters Placeholders", placeholders)
+
+ # Centralized AI call for parameter suggestion (balanced analysis)
+ options = AiCallOptions(
+ operationType=OperationType.ANALYSE_CONTENT,
+ priority=Priority.BALANCED,
+ compressPrompt=True,
+ compressContext=False,
+ processingMode=ProcessingMode.ADVANCED,
+ maxCost=0.05,
+ maxProcessingTime=30
+ )
+
+ paramsResp = await self.services.ai.callAi(
+ prompt=promptTemplate,
+ placeholders=placeholders,
+ options=options
+ )
+ self._writeTraceLog("React Parameters Response", paramsResp)
+
+ # Parse JSON response
+ js = paramsResp[paramsResp.find('{'):paramsResp.rfind('}')+1] if paramsResp else '{}'
+ try:
+ paramObj = json.loads(js)
+ parameters = paramObj.get('parameters', {}) if isinstance(paramObj, dict) else {}
+ except Exception as e:
+ logger.error(f"Failed to parse AI parameters response as JSON: {str(e)}")
+ logger.error(f"Response was: {paramsResp}")
+ parameters = {}
+
+ # Apply minimal defaults in-code (language)
+ if 'language' not in parameters and hasattr(self.services, 'user') and getattr(self.services.user, 'language', None):
+ parameters['language'] = self.services.user.language
+
+ # Build a synthetic TaskAction for execution routing and labels
+ currentRound = getattr(self.workflow, 'currentRound', 0)
+ currentTask = getattr(self.workflow, 'currentTask', 0)
+ resultLabel = f"round{currentRound}_task{currentTask}_action{stepIndex}_results"
+
+ taskAction = self._createTaskAction({
+ "execMethod": action.get('method', ''),
+ "execAction": action.get('name', ''),
+ "execParameters": parameters,
+ "execResultLabel": resultLabel,
+ "status": TaskStatus.PENDING
+ })
+
+ # Execute using existing single action flow (message creation is handled internally)
+ result = await self.actionExecutor.executeSingleAction(taskAction, workflow, taskStep, currentTask, stepIndex, 1)
+
+ return result
+
+ def _observeBuild(self, actionResult: ActionResult) -> Dict[str, Any]:
+ """Observe: build compact observation object from ActionResult with full document metadata"""
+ previews = []
+ notes = []
+ if actionResult and actionResult.documents:
+ # Process all documents and show full metadata
+ for doc in actionResult.documents:
+ # Extract all available metadata without content
+ docMetadata = {
+ "name": getattr(doc, 'documentName', 'Unknown'),
+ "mimeType": getattr(doc, 'mimeType', 'Unknown'),
+ "size": getattr(doc, 'size', 'Unknown'),
+ "created": getattr(doc, 'created', 'Unknown'),
+ "modified": getattr(doc, 'modified', 'Unknown'),
+ "typeGroup": getattr(doc, 'typeGroup', 'Unknown'),
+ "documentId": getattr(doc, 'documentId', 'Unknown'),
+ "reference": getattr(doc, 'reference', 'Unknown')
+ }
+ # Remove 'Unknown' values to keep it clean
+ docMetadata = {k: v for k, v in docMetadata.items() if v != 'Unknown'}
+
+ # Add content size indicator instead of actual content
+ if hasattr(doc, 'documentData') and doc.documentData:
+ if isinstance(doc.documentData, dict) and 'content' in doc.documentData:
+ contentLength = len(str(doc.documentData['content']))
+ docMetadata['contentSize'] = f"{contentLength} characters"
+ else:
+ contentLength = len(str(doc.documentData))
+ docMetadata['contentSize'] = f"{contentLength} characters"
+
+ # Extract comment if available
+ if hasattr(doc, 'documentData') and doc.documentData:
+ data = getattr(doc, 'documentData', None)
+ if isinstance(data, dict):
+ comment = data.get("comment", "")
+ if comment:
+ notes.append(f"Document '{docMetadata.get('name', 'Unknown')}': {comment}")
+
+ previews.append(docMetadata)
+
+ observation = {
+ "success": bool(actionResult.success),
+ "resultLabel": actionResult.resultLabel or "",
+ "documentsCount": len(actionResult.documents) if actionResult.documents else 0,
+ "previews": previews,
+ "notes": notes
+ }
+
+ # NEW: Add content analysis if intent is available
+ if self.currentIntent and actionResult.documents:
+ contentAnalysis = self._analyzeContent(actionResult.documents)
+ observation['contentAnalysis'] = contentAnalysis
+
+ return observation
+
+ def _analyzeContent(self, documents: List[Any]) -> Dict[str, Any]:
+ """Analyzes content of documents for adaptive learning"""
+ try:
+ if not documents:
+ return {"contentType": "none", "contentSnippet": "", "intentMatch": False}
+
+ # Extract content from first document
+ firstDoc = documents[0]
+ content = ""
+ if hasattr(firstDoc, 'documentData'):
+ data = firstDoc.documentData
+ if isinstance(data, dict) and 'content' in data:
+ content = str(data['content'])
+ else:
+ content = str(data)
+
+ # Classify content type
+ contentType = self._classifyContent(content)
+
+ # Create content snippet
+ contentSnippet = content[:200] + "..." if len(content) > 200 else content
+
+ # Assess intent match
+ intentMatch = self._assessIntentMatch(content, self.currentIntent)
+
+ return {
+ "contentType": contentType,
+ "contentSnippet": contentSnippet,
+ "intentMatch": intentMatch
+ }
+
+ except Exception as e:
+ logger.error(f"Error analyzing content: {str(e)}")
+ return {"contentType": "error", "contentSnippet": "", "intentMatch": False}
+
+ def _classifyContent(self, content: str) -> str:
+ """Classifies the type of content"""
+ if not content:
+ return "empty"
+
+ # Check for code
+ codeIndicators = ['def ', 'function', 'import ', 'class ', 'for ', 'while ', 'if ']
+ if any(indicator in content.lower() for indicator in codeIndicators):
+ return "code"
+
+ # Check for numbers
+ if re.search(r'\b\d+\b', content):
+ return "numbers"
+
+ # Check for structured content
+ if any(indicator in content for indicator in ['\n', '\t', '|', '-', '*', '1.', '2.']):
+ return "structured"
+
+ # Default to text
+ return "text"
+
+ def _assessIntentMatch(self, content: str, intent: Dict[str, Any]) -> bool:
+ """Assesses if content matches the user intent"""
+ if not intent:
+ return False
+
+ dataType = intent.get("dataType", "unknown")
+
+ if dataType == "numbers":
+ # Check if content contains actual numbers, not code
+ hasNumbers = bool(re.search(r'\b\d+\b', content))
+ isNotCode = not any(keyword in content.lower() for keyword in ['def ', 'function', 'import '])
+ return hasNumbers and isNotCode
+
+ elif dataType == "text":
+ # Check if content is readable text
+ words = re.findall(r'\b\w+\b', content)
+ return len(words) > 5
+
+ elif dataType == "documents":
+ # Check if content is suitable for document creation
+ hasStructure = any(indicator in content for indicator in ['\n', '\t', '|', '-', '*'])
+ hasContent = len(content.strip()) > 50
+ return hasStructure and hasContent
+
+ return True # Default to match for unknown types
+
+ def _collectFeedback(self, result: Any, validation: Dict[str, Any], intent: Dict[str, Any]) -> Dict[str, Any]:
+ """Collects comprehensive feedback from action execution"""
+ try:
+ # Extract content summary
+ contentDelivered = ""
+ if result.documents:
+ firstDoc = result.documents[0]
+ if hasattr(firstDoc, 'documentData'):
+ data = firstDoc.documentData
+ if isinstance(data, dict) and 'content' in data:
+ content = str(data['content'])
+ contentDelivered = content[:100] + "..." if len(content) > 100 else content
+ else:
+ contentDelivered = str(data)[:100] + "..." if len(str(data)) > 100 else str(data)
+
+ return {
+ "actionAttempted": result.resultLabel or "unknown",
+ "parametersUsed": {}, # Would be extracted from action context
+ "contentDelivered": contentDelivered,
+ "intentMatchScore": validation.get('qualityScore', 0),
+ "qualityScore": validation.get('qualityScore', 0),
+ "issuesFound": validation.get('improvementSuggestions', []),
+ "learningOpportunities": validation.get('improvementSuggestions', []),
+ "userSatisfaction": None, # Would be collected from user feedback
+ "timestamp": datetime.now(timezone.utc).timestamp()
+ }
+
+ except Exception as e:
+ logger.error(f"Error collecting feedback: {str(e)}")
+ return {
+ "actionAttempted": "unknown",
+ "parametersUsed": {},
+ "contentDelivered": "",
+ "intentMatchScore": 0,
+ "qualityScore": 0,
+ "issuesFound": [],
+ "learningOpportunities": [],
+ "userSatisfaction": None,
+ "timestamp": datetime.now(timezone.utc).timestamp()
+ }
+
+ async def _refineDecide(self, context: TaskContext, observation: Dict[str, Any]) -> Dict[str, Any]:
+ """Refine: decide continue or stop, with reason"""
+ promptTemplate = createRefinementPromptTemplate()
+
+ # Extract content for placeholders
+ userPrompt = extractUserPrompt(context)
+
+ # Create proper ReviewContext for extractReviewContent
+ from modules.datamodels.datamodelWorkflow import ReviewContext
+ reviewContext = ReviewContext(
+ task_step=context.task_step,
+ task_actions=[],
+ action_results=[], # React mode doesn't have action results in this context
+ step_result={'observation': observation},
+ workflow_id=context.workflow_id,
+ previous_results=[]
+ )
+ reviewContent = extractReviewContent(reviewContext)
+
+ # NEW: Add content validation to review content
+ enhancedReviewContent = reviewContent
+ if 'contentValidation' in observation:
+ validation = observation['contentValidation']
+ enhancedReviewContent += f"\n\nCONTENT VALIDATION:\n"
+ enhancedReviewContent += f"Overall Success: {validation['overallSuccess']}\n"
+ enhancedReviewContent += f"Quality Score: {validation['qualityScore']:.2f}\n"
+ if validation['improvementSuggestions']:
+ enhancedReviewContent += f"Improvement Suggestions: {', '.join(validation['improvementSuggestions'])}\n"
+
+ # NEW: Add content analysis to review content
+ if 'contentAnalysis' in observation:
+ analysis = observation['contentAnalysis']
+ enhancedReviewContent += f"\nCONTENT ANALYSIS:\n"
+ enhancedReviewContent += f"Content Type: {analysis['contentType']}\n"
+ enhancedReviewContent += f"Intent Match: {analysis['intentMatch']}\n"
+ if analysis['contentSnippet']:
+ enhancedReviewContent += f"Content Preview: {analysis['contentSnippet']}\n"
+
+ # NEW: Add progress state to review content
+ progressState = self.progressTracker.getCurrentProgress()
+ enhancedReviewContent += f"\nPROGRESS STATE:\n"
+ enhancedReviewContent += f"Completed Objectives: {len(progressState['completedObjectives'])}\n"
+ enhancedReviewContent += f"Partial Achievements: {len(progressState['partialAchievements'])}\n"
+ enhancedReviewContent += f"Failed Attempts: {len(progressState['failedAttempts'])}\n"
+ enhancedReviewContent += f"Current Phase: {progressState['currentPhase']}\n"
+ if progressState['nextActionsSuggested']:
+ enhancedReviewContent += f"Next Action Suggestions: {', '.join(progressState['nextActionsSuggested'])}\n"
+
+ # Create placeholders dictionary
+ placeholders = {
+ "USER_PROMPT": userPrompt,
+ "REVIEW_CONTENT": enhancedReviewContent
+ }
+
+ self._writeTraceLog("React Refinement Prompt", promptTemplate)
+ self._writeTraceLog("React Refinement Placeholders", placeholders)
+
+ # Centralized AI call for refinement decision (balanced analysis)
+ options = AiCallOptions(
+ operationType=OperationType.ANALYSE_CONTENT,
+ priority=Priority.BALANCED,
+ compressPrompt=True,
+ compressContext=False,
+ processingMode=ProcessingMode.ADVANCED,
+ maxCost=0.05,
+ maxProcessingTime=30
+ )
+
+ resp = await self.services.ai.callAi(
+ prompt=promptTemplate,
+ placeholders=placeholders,
+ options=options
+ )
+ self._writeTraceLog("React Refinement Response", resp)
+ js = resp[resp.find('{'):resp.rfind('}')+1] if resp else '{}'
+ try:
+ decision = json.loads(js)
+ except Exception:
+ decision = {"decision": "continue", "reason": "default"}
+ return decision
+
+ async def _createReactActionMessage(self, workflow: ChatWorkflow, selection: Dict[str, Any],
+ step: int, maxSteps: int, taskIndex: int, messageType: str,
+ result: ActionResult = None, observation: Dict[str, Any] = None):
+ """Create user-friendly messages for React workflow actions"""
+ try:
+ action = selection.get('action', {})
+ method = action.get('method', '')
+ actionName = action.get('name', '')
+
+ # Get user language
+ userLanguage = self.services.user.language if self.services and self.services.user else 'en'
+
+ if messageType == "before":
+ # Message BEFORE action execution
+ userMessage = await self._generateActionIntentionMessage(method, actionName, userLanguage)
+ messageContent = f"π **Step {step}/{maxSteps}**\n\n{userMessage}"
+ status = "step"
+ actionProgress = "pending"
+ documentsLabel = f"action_{step}_intention"
+
+ elif messageType == "after":
+ # Message AFTER action execution
+ userMessage = await self._generateActionResultMessage(method, actionName, result, observation, userLanguage)
+ successIcon = "β
" if result and result.success else "β"
+ messageContent = f"{successIcon} **Step {step}/{maxSteps} Complete**\n\n{userMessage}"
+ status = "step"
+ actionProgress = "success" if result and result.success else "fail"
+ documentsLabel = observation.get('resultLabel') if observation else f"action_{step}_result"
+ else:
+ return
+
+ # Create workflow message
+ messageData = {
+ "workflowId": workflow.id,
+ "role": "assistant",
+ "message": messageContent,
+ "status": status,
+ "sequenceNr": len(workflow.messages) + 1,
+ "publishedAt": self.services.utils.getUtcTimestamp(),
+ "documentsLabel": documentsLabel,
+ "documents": [],
+ "roundNumber": workflow.currentRound,
+ "taskNumber": taskIndex,
+ "actionNumber": step,
+ "actionProgress": actionProgress
+ }
+
+ message = self.services.interfaceDbChat.createMessage(messageData)
+ if message:
+ workflow.messages.append(message)
+
+ except Exception as e:
+ logger.error(f"Error creating React action message: {str(e)}")
+
+ async def _generateActionIntentionMessage(self, method: str, actionName: str, userLanguage: str):
+ """Generate user-friendly message explaining what action will do"""
+ try:
+ # Create a simple AI prompt to generate user-friendly action descriptions
+ prompt = f"""Generate a brief, user-friendly message explaining what the {method}.{actionName} action will do.
+
+User language: {userLanguage}
+
+Examples:
+- For ai.process: "I'll analyze the content and provide insights"
+- For document.extract: "I'll extract the key information from the documents"
+- For document.generate: "I'll create a formatted report from the documents"
+- For outlook.composeEmail: "I'll compose an email based on your requirements"
+- For outlook.sendEmail: "I'll send the composed email"
+- For sharepoint.findDocumentPath: "I'll search for the requested documents"
+- For sharepoint.readDocuments: "I'll read the document contents"
+
+Return only the user-friendly message, no technical details."""
+
+ # Call AI to generate user-friendly message
+ response = await self.services.ai.callAi(
+ prompt=prompt,
+ options=AiCallOptions(
+ operationType=OperationType.GENERATE_CONTENT,
+ priority=Priority.SPEED,
+ compressPrompt=True,
+ maxCost=0.01,
+ maxProcessingTime=5
+ )
+ )
+
+ return response.strip() if response else f"Executing {method}.{actionName} action..."
+
+ except Exception as e:
+ logger.error(f"Error generating action intention message: {str(e)}")
+ return f"Executing {method}.{actionName} action..."
+
+ async def _generateActionResultMessage(self, method: str, actionName: str, result: ActionResult,
+ observation: Dict[str, Any], userLanguage: str):
+ """Generate user-friendly message explaining action results"""
+ try:
+ # Build result context
+ resultContext = ""
+ if result and result.documents:
+ docCount = len(result.documents)
+ resultContext = f"Generated {docCount} document(s)"
+ elif observation and observation.get('documentsCount', 0) > 0:
+ docCount = observation.get('documentsCount', 0)
+ resultContext = f"Generated {docCount} document(s)"
+
+ # Create AI prompt for result message
+ prompt = f"""Generate a brief, user-friendly message explaining the result of the {method}.{actionName} action.
+
+User language: {userLanguage}
+Success: {result.success if result else 'Unknown'}
+Result context: {resultContext}
+
+Examples:
+- For successful ai.process: "Analysis complete! I've processed the content and generated insights."
+- For successful document.extract: "Extraction complete! I've extracted the key information from the documents."
+- For successful document.generate: "Report generated! I've created a formatted document with the requested content."
+- For successful outlook.composeEmail: "Email composed! I've prepared the email content for sending."
+- For successful outlook.sendEmail: "Email sent! The message has been delivered successfully."
+- For failed actions: "The action encountered an issue. Please check the details."
+
+Return only the user-friendly message, no technical details."""
+
+ # Call AI to generate user-friendly result message
+ response = await self.services.ai.callAi(
+ prompt=prompt,
+ options=AiCallOptions(
+ operationType=OperationType.GENERATE_CONTENT,
+ priority=Priority.SPEED,
+ compressPrompt=True,
+ maxCost=0.01,
+ maxProcessingTime=5
+ )
+ )
+
+ return response.strip() if response else f"{method}.{actionName} action completed"
+
+ except Exception as e:
+ logger.error(f"Error generating action result message: {str(e)}")
+ return f"{method}.{actionName} action completed"
+
+ def _createTaskAction(self, actionData: Dict[str, Any]) -> TaskAction:
+ """Creates a new task action for React mode"""
+ try:
+ import uuid
+
+ # Ensure ID is present
+ if "id" not in actionData or not actionData["id"]:
+ actionData["id"] = f"action_{uuid.uuid4()}"
+
+ # Ensure required fields
+ if "status" not in actionData:
+ actionData["status"] = TaskStatus.PENDING
+
+ if "execMethod" not in actionData:
+ logger.error("execMethod is required for task action")
+ return None
+
+ if "execAction" not in actionData:
+ logger.error("execAction is required for task action")
+ return None
+
+ if "execParameters" not in actionData:
+ actionData["execParameters"] = {}
+
+ # Use generic field separation based on TaskAction model
+ simpleFields, objectFields = self.services.interfaceDbChat._separate_object_fields(TaskAction, actionData)
+
+ # Create action in database
+ createdAction = self.services.interfaceDbChat.db.recordCreate(TaskAction, simpleFields)
+
+ # Convert to TaskAction model
+ return TaskAction(
+ id=createdAction["id"],
+ execMethod=createdAction["execMethod"],
+ execAction=createdAction["execAction"],
+ execParameters=createdAction.get("execParameters", {}),
+ execResultLabel=createdAction.get("execResultLabel"),
+ expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
+ status=createdAction.get("status", TaskStatus.PENDING),
+ error=createdAction.get("error"),
+ retryCount=createdAction.get("retryCount", 0),
+ retryMax=createdAction.get("retryMax", 3),
+ processingTime=createdAction.get("processingTime"),
+ timestamp=float(createdAction.get("timestamp", self.services.utils.getUtcTimestamp())),
+ result=createdAction.get("result"),
+ resultDocuments=createdAction.get("resultDocuments", []),
+ userMessage=createdAction.get("userMessage")
+ )
+
+ except Exception as e:
+ logger.error(f"Error creating task action: {str(e)}")
+ return None
+
+ def _updateWorkflowBeforeExecutingTask(self, taskNumber: int):
+ """Update workflow object before executing a task"""
+ try:
+ updateData = {
+ "currentTask": taskNumber,
+ "currentAction": 0,
+ "totalActions": 0
+ }
+
+ # Update workflow object
+ self.workflow.currentTask = taskNumber
+ self.workflow.currentAction = 0
+ self.workflow.totalActions = 0
+
+ # Update in database
+ self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
+ logger.info(f"Updated workflow {self.workflow.id} before executing task {taskNumber}: {updateData}")
+
+ except Exception as e:
+ logger.error(f"Error updating workflow before executing task: {str(e)}")
+
+ def _updateWorkflowBeforeExecutingAction(self, actionNumber: int):
+ """Update workflow object before executing an action"""
+ try:
+ updateData = {
+ "currentAction": actionNumber
+ }
+
+ # Update workflow object
+ self.workflow.currentAction = actionNumber
+
+ # Update in database
+ self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
+ logger.info(f"Updated workflow {self.workflow.id} before executing action {actionNumber}: {updateData}")
+
+ except Exception as e:
+ logger.error(f"Error updating workflow before executing action: {str(e)}")
+
+ def _createTaskAction(self, actionData: Dict[str, Any]) -> TaskAction:
+ """Creates a new task action for React mode"""
+ try:
+ import uuid
+
+ # Ensure ID is present
+ if "id" not in actionData or not actionData["id"]:
+ actionData["id"] = f"action_{uuid.uuid4()}"
+
+ # Ensure required fields
+ if "status" not in actionData:
+ actionData["status"] = TaskStatus.PENDING
+
+ if "execMethod" not in actionData:
+ logger.error("execMethod is required for task action")
+ return None
+
+ if "execAction" not in actionData:
+ logger.error("execAction is required for task action")
+ return None
+
+ if "execParameters" not in actionData:
+ actionData["execParameters"] = {}
+
+ # Use generic field separation based on TaskAction model
+ simpleFields, objectFields = self.services.interfaceDbChat._separate_object_fields(TaskAction, actionData)
+
+ # Create action in database
+ createdAction = self.services.interfaceDbChat.db.recordCreate(TaskAction, simpleFields)
+
+ # Convert to TaskAction model
+ return TaskAction(
+ id=createdAction["id"],
+ execMethod=createdAction["execMethod"],
+ execAction=createdAction["execAction"],
+ execParameters=createdAction.get("execParameters", {}),
+ execResultLabel=createdAction.get("execResultLabel"),
+ expectedDocumentFormats=createdAction.get("expectedDocumentFormats"),
+ status=createdAction.get("status", TaskStatus.PENDING),
+ error=createdAction.get("error"),
+ retryCount=createdAction.get("retryCount", 0),
+ retryMax=createdAction.get("retryMax", 3),
+ processingTime=createdAction.get("processingTime"),
+ timestamp=float(createdAction.get("timestamp", self.services.utils.getUtcTimestamp())),
+ result=createdAction.get("result"),
+ resultDocuments=createdAction.get("resultDocuments", []),
+ userMessage=createdAction.get("userMessage")
+ )
+
+ except Exception as e:
+ logger.error(f"Error creating task action: {str(e)}")
+ return None
+
+ def _writeTraceLog(self, contextText: str, data: Any) -> None:
+ """Write trace data to configured trace file if in debug mode"""
+ try:
+ import os
+ import json
+ from datetime import datetime, UTC
+
+ # Only write if logger is in debug mode
+ if logger.level > logging.DEBUG:
+ return
+
+ # Get log directory from configuration
+ logDir = self.services.utils.configGet("APP_LOGGING_LOG_DIR", "./")
+ if not os.path.isabs(logDir):
+ # If relative path, make it relative to the gateway directory
+ gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
+ logDir = os.path.join(gatewayDir, logDir)
+
+ # Ensure log directory exists
+ os.makedirs(logDir, exist_ok=True)
+
+ # Create trace file path
+ traceFile = os.path.join(logDir, "log_trace.log")
+
+ # Format the trace entry
+ timestamp = datetime.fromtimestamp(self.services.utils.getUtcTimestamp(), UTC).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
+ traceEntry = f"[{timestamp}] {contextText}\n"
+
+ # Add data if provided - show full content without truncation
+ if data is not None:
+ if isinstance(data, (dict, list)):
+ # Use ensure_ascii=False to preserve Unicode characters and indent=2 for readability
+ traceEntry += f"Data: {json.dumps(data, indent=2, default=str, ensure_ascii=False)}\n"
+ else:
+ # For string data, show full content without truncation
+ traceEntry += f"Data: {str(data)}\n"
+
+ traceEntry += "-" * 80 + "\n\n"
+
+ # Write to trace file
+ with open(traceFile, "a", encoding="utf-8") as f:
+ f.write(traceEntry)
+
+ except Exception as e:
+ # Don't log trace errors to avoid recursion
+ pass
diff --git a/modules/workflows/processing/promptFactory.py b/modules/workflows/processing/promptFactory.py
deleted file mode 100644
index 1315575e..00000000
--- a/modules/workflows/processing/promptFactory.py
+++ /dev/null
@@ -1,1098 +0,0 @@
-# promptFactory.py
-# Contains all prompt creation functions
-
-import json
-import logging
-import importlib
-import pkgutil
-import inspect
-from typing import Any, Dict, List
-from modules.datamodels.datamodelWorkflow import TaskContext, ReviewContext, DocumentExchange
-from modules.datamodels.datamodelChat import ChatDocument
-from modules.services.serviceGeneration.subDocumentUtility import getFileExtension
-from modules.workflows.methods.methodBase import MethodBase
-
-# Set up logger
-logger = logging.getLogger(__name__)
-
-# Global methods catalog - moved from serviceCenter
-methods = {}
-
-def _discoverMethods(serviceCenter):
- """Dynamically discover all method classes and their actions in modules methods package"""
- try:
- # Import the methods package
- methodsPackage = importlib.import_module('modules.workflows.methods')
-
- # Discover all modules in the package
- for _, name, isPkg in pkgutil.iter_modules(methodsPackage.__path__):
- if not isPkg and name.startswith('method'):
- try:
- # Import the module
- module = importlib.import_module(f'modules.workflows.methods.{name}')
-
- # Find all classes in the module that inherit from MethodBase
- for itemName, item in inspect.getmembers(module):
- if (inspect.isclass(item) and
- issubclass(item, MethodBase) and
- item != MethodBase):
- # Instantiate the method
- methodInstance = item(serviceCenter)
-
- # Discover actions from public methods
- actions = {}
- for methodName, method in inspect.getmembers(type(methodInstance), predicate=inspect.iscoroutinefunction):
- if not methodName.startswith('_'):
- # Bind the method to the instance
- bound_method = method.__get__(methodInstance, type(methodInstance))
- sig = inspect.signature(method)
- params = {}
- for paramName, param in sig.parameters.items():
- if paramName not in ['self']:
- # Get parameter type
- paramType = param.annotation if param.annotation != param.empty else Any
-
- # Get parameter description from docstring or default
- paramDesc = None
- if param.default != param.empty and hasattr(param.default, '__doc__'):
- paramDesc = param.default.__doc__
-
- params[paramName] = {
- 'type': paramType,
- 'required': param.default == param.empty,
- 'description': paramDesc,
- 'default': param.default if param.default != param.empty else None
- }
-
- actions[methodName] = {
- 'description': method.__doc__ or '',
- 'parameters': params,
- 'method': bound_method
- }
-
- # Add method instance with discovered actions
- methods[methodInstance.name] = {
- 'instance': methodInstance,
- 'description': methodInstance.description,
- 'actions': actions
- }
- logger.info(f"Discovered method: {methodInstance.name} with {len(actions)} actions")
-
- except Exception as e:
- logger.error(f"Error loading method module {name}: {str(e)}", exc_info=True)
-
- except Exception as e:
- logger.error(f"Error discovering methods: {str(e)}")
-
-def getMethodsList(serviceCenter) -> List[str]:
- """Get list of available methods with their signatures in the required format"""
- # Initialize methods if not already done
- if not methods:
- _discoverMethods(serviceCenter)
-
- methodList = []
- for methodName, method in methods.items():
- methodInstance = method['instance']
- for actionName, action in method['actions'].items():
- # Use the new signature format from MethodBase
- signature = methodInstance.getActionSignature(actionName)
- if signature:
- methodList.append(signature)
- return methodList
-
-def getEnhancedDocumentContext(serviceCenter) -> str:
- """Get enhanced document context formatted for action planning prompts with proper docList and docItem references"""
- try:
- document_list = serviceCenter.getDocumentReferenceList()
-
- # Build technical context string for AI action planning
- context = "AVAILABLE DOCUMENTS:\n\n"
-
- # Process chat exchanges (current round)
- if document_list["chat"]:
- context += "CURRENT ROUND DOCUMENTS:\n"
- for exchange in document_list["chat"]:
- # Generate docList reference for the exchange (using message ID and label)
- # Find the message that corresponds to this exchange
- message_id = None
- for message in serviceCenter.workflow.messages:
- if hasattr(message, 'documentsLabel') and message.documentsLabel == exchange.documentsLabel:
- message_id = message.id
- break
-
- if message_id:
- doc_list_ref = f"docList:{message_id}:{exchange.documentsLabel}"
- else:
- # Fallback to label-only format if message ID not found
- doc_list_ref = f"docList:{exchange.documentsLabel}"
-
- logger.debug(f"Using document label for action planning: {exchange.documentsLabel} (message_id: {message_id})")
- context += f"- {doc_list_ref} contains:\n"
- # Generate docItem references for each document in the list
- for doc_ref in exchange.documents:
- if doc_ref.startswith("docItem:"):
- context += f" - {doc_ref}\n"
- else:
- # Convert to proper docItem format if needed
- context += f" - docItem:{doc_ref}\n"
- context += "\n"
-
- # Process history exchanges (previous rounds)
- if document_list["history"]:
- context += "WORKFLOW HISTORY DOCUMENTS:\n"
- for exchange in document_list["history"]:
- # Generate docList reference for the exchange (using message ID and label)
- # Find the message that corresponds to this exchange
- message_id = None
- for message in serviceCenter.workflow.messages:
- if hasattr(message, 'documentsLabel') and message.documentsLabel == exchange.documentsLabel:
- message_id = message.id
- break
-
- if message_id:
- doc_list_ref = f"docList:{message_id}:{exchange.documentsLabel}"
- else:
- # Fallback to label-only format if message ID not found
- doc_list_ref = f"docList:{exchange.documentsLabel}"
-
- logger.debug(f"Using history document label for action planning: {exchange.documentsLabel} (message_id: {message_id})")
- context += f"- {doc_list_ref} contains:\n"
- # Generate docItem references for each document in the list
- for doc_ref in exchange.documents:
- if doc_ref.startswith("docItem:"):
- context += f" - {doc_ref}\n"
- else:
- # Convert to proper docItem format if needed
- context += f" - docItem:{doc_ref}\n"
- context += "\n"
-
- if not document_list["chat"] and not document_list["history"]:
- context += "NO DOCUMENTS AVAILABLE - This workflow has no documents to process.\n"
-
- return context
-
- except Exception as e:
- logger.error(f"Error generating enhanced document context: {str(e)}")
- return "NO DOCUMENTS AVAILABLE - Error generating document context."
-
-# Prompt creation helpers
-
-def _getAvailableDocuments(workflow) -> str:
- """
- Get simple description of available documents for task planning.
-
- Args:
- workflow: ChatWorkflow object
-
- Returns:
- str: Simple description of document availability
- """
- total_documents = 0
- document_types = set()
-
- for message in workflow.messages:
- if message.documents:
- total_documents += len(message.documents)
- for doc in message.documents:
- try:
- file_extension = getFileExtension(doc.fileName)
- if file_extension:
- document_types.add(file_extension.upper())
- except:
- pass
-
- if total_documents == 0:
- return "No documents available"
- elif len(document_types) == 0:
- return f"{total_documents} document(s) available"
- else:
- types_str = ", ".join(sorted(document_types))
- return f"{total_documents} document(s) available ({types_str} files)"
-
-def _getConnectionReferenceList(service) -> List[str]:
- """Get list of all UserConnection objects as references with enhanced state information"""
- connections = []
- # Get user connections
- user_connections = service.interfaceDbApp.getUserConnections(service.user.id)
-
- refreshed_count = 0
- for conn in user_connections:
- # Get enhanced connection reference with state information
- enhanced_ref = service.getConnectionReferenceFromUserConnection(conn)
- connections.append(enhanced_ref)
-
- # Count refreshed tokens
- if "refreshed" in enhanced_ref:
- refreshed_count += 1
-
- # Sort by connection reference
- if refreshed_count > 0:
- logger.info(f"Refreshed {refreshed_count} connection tokens while building action planning prompt")
- return sorted(connections)
-
-def _getPreviousRoundContext(service, workflow) -> str:
- """Get context from previous workflow rounds to help understand follow-up prompts"""
- try:
- if not workflow or not hasattr(workflow, 'messages') or not workflow.messages:
- return ""
-
- # Get current round number
- current_round = getattr(workflow, 'currentRound', 0)
-
- # If this is round 0 or 1, there's no previous context
- if current_round <= 1:
- return ""
-
- # Find messages from previous rounds (rounds before current)
- previous_messages = []
- for message in workflow.messages:
- message_round = getattr(message, 'roundNumber', 0)
- if message_round > 0 and message_round < current_round:
- previous_messages.append(message)
-
- if not previous_messages:
- return ""
-
- # Sort by round number and sequence to get chronological order
- previous_messages.sort(key=lambda msg: (getattr(msg, 'roundNumber', 0), getattr(msg, 'sequenceNr', 0)))
-
- # Build context summary
- context_parts = []
- current_round_context = {}
-
- for message in previous_messages:
- round_num = getattr(message, 'roundNumber', 0)
- if round_num not in current_round_context:
- current_round_context[round_num] = {
- 'user_inputs': [],
- 'assistant_responses': [],
- 'task_outcomes': [],
- 'documents_processed': []
- }
-
- # Categorize messages
- if message.role == 'user':
- current_round_context[round_num]['user_inputs'].append(message.message)
- elif message.role == 'assistant':
- # Check if it's a task completion or error message
- if 'task' in message.message.lower() and ('completed' in message.message.lower() or 'failed' in message.message.lower() or 'error' in message.message.lower()):
- current_round_context[round_num]['task_outcomes'].append(message.message)
- else:
- current_round_context[round_num]['assistant_responses'].append(message.message)
-
- # Check for document processing
- if hasattr(message, 'documents') and message.documents:
- doc_names = [doc.fileName for doc in message.documents if hasattr(doc, 'fileName')]
- if doc_names:
- current_round_context[round_num]['documents_processed'].extend(doc_names)
-
- # Build context summary
- for round_num in sorted(current_round_context.keys()):
- round_data = current_round_context[round_num]
- context_parts.append(f"ROUND {round_num} CONTEXT:")
-
- if round_data['user_inputs']:
- context_parts.append(f" User requests: {'; '.join(round_data['user_inputs'])}")
-
- if round_data['task_outcomes']:
- context_parts.append(f" Task outcomes: {'; '.join(round_data['task_outcomes'])}")
-
- if round_data['documents_processed']:
- context_parts.append(f" Documents processed: {', '.join(set(round_data['documents_processed']))}")
-
- if context_parts:
- return "\n".join(context_parts)
- else:
- return ""
-
- except Exception as e:
- logger.error(f"Error getting previous round context: {str(e)}")
- return ""
-
-def createTaskPlanningPrompt(context: TaskContext, service) -> str:
- """Create enhanced prompt for task planning with user-friendly message generation and language detection"""
- # Get user language directly from service.user.language
- user_language = service.user.language if service and service.user else 'en'
-
- # Extract user request from context - use Pydantic model directly
- user_request = context.task_step.objective if context.task_step else 'No request specified'
-
- # Get available documents using generic function
- available_documents = _getAvailableDocuments(context.workflow) if context.workflow else "No documents available"
-
- # Get previous workflow round context for better understanding of follow-up prompts
- previous_round_context = _getPreviousRoundContext(service, context.workflow)
-
- return f"""You are a task planning AI that analyzes user requests and creates structured, self-contained task plans with user-friendly feedback messages.
-
-USER REQUEST: {user_request}
-
-AVAILABLE DOCUMENTS: {available_documents}
-
-PREVIOUS WORKFLOW ROUNDS CONTEXT:
-{previous_round_context if previous_round_context else "No previous workflow rounds - this is the first round."}
-
-INSTRUCTIONS:
-1. Analyze the user request, available documents, and previous workflow rounds context
-2. If the user request appears to be a follow-up (like "try again", "versuche es nochmals", "retry", etc.),
- use the PREVIOUS WORKFLOW ROUNDS CONTEXT to understand what the user wants to retry or continue
-3. Group related topics and sequential steps into single, comprehensive tasks
-4. Focus on business outcomes, not technical operations
-5. Make each task self-contained: clearly state what to do and what outputs are expected
-6. Ensure proper handover between tasks (later actions will use your task outputs)
-7. Detect the language of the user request and include it in languageUserDetected
-8. Generate user-friendly messages for each task in the user's request language
-9. Return a JSON object with the exact structure shown below
-
-TASK GROUPING PRINCIPLES:
-- COMBINE RELATED TOPICS: Group related subjects, sequential steps, or workflow-structured activities into single tasks
-- SEQUENTIAL WORKFLOWS: If the user says "first do this, then that, then that" β create ONE task that handles the entire sequence
-- SIMILAR CONTENT: If multiple items deal with the same subject matter β combine into ONE comprehensive task
-- ONLY SPLIT WHEN DIFFERENT: Create separate tasks ONLY when the user explicitly wants different, independent things
-
-EXAMPLES OF GOOD TASK GROUPING:
-
-COMBINE INTO ONE TASK:
-- "Analyze the documents, extract key insights, and create a summary report" β ONE task: "Analyze documents and create comprehensive summary report"
-- "First check my emails, then respond to urgent ones, then organize my inbox" β ONE task: "Process and organize email inbox with priority responses"
-- "Review the budget, analyze spending patterns, and suggest cost-cutting measures" β ONE task: "Comprehensive budget analysis with optimization recommendations"
-- "Create a business strategy, develop marketing plan, and prepare presentation" β ONE task: "Develop complete business strategy with marketing plan and presentation"
-
-SPLIT INTO MULTIPLE TASKS:
-- "Create a business strategy for Q4" AND "Check my emails for messages from my assistant" β TWO separate tasks (different subjects)
-- "Analyze customer feedback" AND "Prepare quarterly financial report" β TWO separate tasks (different business areas)
-- "Review project timeline" AND "Update employee handbook" β TWO separate tasks (unrelated activities)
-
-TASK PLANNING PRINCIPLES:
-- Break down complex requests into logical, sequential steps
-- Focus on business value and outcomes
-- Keep tasks at a meaningful level of abstraction (not implementation details)
-- Each task should produce results that can be used by subsequent tasks
-- Ensure clear dependencies and handovers between tasks
-- Provide clear, actionable user messages in the user's request language
-- Group related activities to minimize task fragmentation
-- Only create multiple tasks when dealing with truly different, independent objectives
-- Make task objectives action-oriented and specific (include scope, data sources to consider, and output intent at high level)
-- Write success_criteria as measurable acceptance criteria focusing on outputs (what artifacts or insights will exist and how they are validated)
-
-FOLLOW-UP PROMPT HANDLING:
-- If the user request is a follow-up (e.g., "try again", "versuche es nochmals", "retry", "continue", "proceed"),
- analyze the PREVIOUS WORKFLOW ROUNDS CONTEXT to understand what failed or was incomplete
-- Use the previous round's user requests and task outcomes to determine what the user wants to retry
-- If previous rounds failed due to missing documents, and documents are now available,
- create tasks that use the newly available documents to accomplish the original request
-- Maintain the same business objective from previous rounds but adapt to current available resources
-
-SPECIFIC SCENARIO HANDLING:
-- If previous round failed with "documents missing" error and current round has documents available,
- the user likely wants to retry the same operation with the newly provided documents
-- Example: Previous round "speichere mir die 3 dokumente im sharepoint unter xxx" failed due to missing documents,
- current round "versuche es nochmals" with documents should retry the SharePoint save operation
-- Always check if the current request is a retry by looking for retry keywords and previous round context
-
-REQUIRED JSON STRUCTURE:
-{{
- "overview": "Brief description of the overall plan",
- "languageUserDetected": "en", // Language code detected from user request (en, de, fr, it, es, etc.)
- "userMessage": "User-friendly message explaining the task plan in user's request language",
- "tasks": [
- {{
- "id": "task_1",
- "objective": "Clear business objective this task accomplishes (combining related activities)",
- "dependencies": ["task_0"], // IDs of tasks that must complete first
- "success_criteria": ["criteria1", "criteria2"],
- "estimated_complexity": "low|medium|high",
- "userMessage": "User-friendly message explaining what this task will accomplish in user's request language"
- }}
- ]
-}}
-
-EXAMPLES OF GOOD TASK OBJECTIVES (COMBINING RELATED ACTIVITIES):
-- "Analyze documents and extract key insights for business communication"
-- "Create professional business communication incorporating analyzed information"
-- "Execute business communication using specified channels and document outcomes"
-- "Develop comprehensive business strategy with implementation roadmap and success metrics"
-
-EXAMPLES OF WELL-FORMED SUCCESS CRITERIA (OUTPUT-FOCUSED):
-- "Deliver a prioritized list of 10β20 candidates with justification"
-- "Provide a structured JSON with fields: company, ticker, rationale, metrics"
-- "Produce a presentation outline with 5 sections and bullet points per section"
-- "Include data sources and date stamped references for traceability"
-
-EXAMPLES OF GOOD SUCCESS CRITERIA:
-- "Key insights extracted and ready for business use"
-- "Professional communication created with clear business value"
-- "Business communication successfully delivered and documented"
-- "All outcomes properly documented and accessible"
-
-EXAMPLES OF BAD TASK OBJECTIVES:
-- "Read the PDF file" (too granular - should be "Analyze document content")
-- "Convert data to CSV" (implementation detail - should be "Structure data for analysis")
-- "Send email" (too specific - should be "Deliver business communication")
-
-LANGUAGE DETECTION:
-- Analyze the user request text to identify the language
-- Use standard language codes: en (English), de (German), fr (French), it (Italian), es (Spanish), etc.
-- If the language cannot be determined, use "en" as default
-- Include the detected language in the languageUserDetected field
-
-NOTE: Respond with ONLY the JSON object. Do not include any explanatory text."""
-
-async def createActionDefinitionPrompt(context: TaskContext, service) -> str:
- """Create enhanced prompt for action generation with user-friendly messages and enhanced document context"""
- methodList = getMethodsList(service)
- method_actions = {}
- for sig in methodList:
- if '.' in sig:
- method, rest = sig.split('.', 1)
- action = rest.split('(')[0]
- method_actions.setdefault(method, []).append((action, sig))
-
- messageSummary = await service.methodService.summarizeChat(context.workflow.messages) if context.workflow else ""
-
- # Get enhanced document context using the new method
- available_documents_str = getEnhancedDocumentContext(service)
-
- # Get available documents and connections using generic functions
- available_docs_summary = _getAvailableDocuments(context.workflow)
- connRefs = _getConnectionReferenceList(service)
-
- # Create a structured JSON format for better AI parsing
- # This replaces the old hard-to-read format with a clean JSON structure
- # that the AI can easily parse and understand
- available_methods_json = {}
- for method, actions in method_actions.items():
- available_methods_json[method] = {}
- # Get the method instance for accessing docstrings
- method_instance = methods.get(method, {}).get('instance') if methods else None
-
- for action, sig in actions:
- # Parse the signature to extract parameters
- if '(' in sig and ')' in sig:
- # Extract parameters from signature
- params_start = sig.find('(')
- params_end = sig.find(')')
- params_str = sig[params_start+1:params_end]
-
- # Parse parameters directly from the docstring - much simpler and more reliable!
- parameters = []
-
- # Get the actual function's docstring
- if method_instance and hasattr(method_instance, action):
- func = getattr(method_instance, action)
- if hasattr(func, '__doc__') and func.__doc__:
- docstring = func.__doc__
-
- # Parse Parameters section from docstring
- lines = docstring.split('\n')
- in_parameters = False
- for i, line in enumerate(lines):
- original_line = line
- line = line.strip()
-
- if line == 'Parameters:':
- in_parameters = True
- continue
- elif in_parameters and line and not original_line.startswith(' ') and not original_line.startswith('\t'):
- # End of parameters section
- break
- elif in_parameters and (original_line.startswith(' ') or original_line.startswith('\t')):
- # This is a parameter line - already stripped
- # Format: "paramName (type): description"
- if ':' in line:
- # Find the colon that separates param from description
- colon_pos = line.find(':')
- param_part = line[:colon_pos].strip()
- description = line[colon_pos+1:].strip()
-
- # Parse parameter name and type
- if '(' in param_part and ')' in param_part:
- param_name = param_part.split('(')[0].strip()
- type_part = param_part[param_part.find('(')+1:param_part.find(')')].strip()
-
- # Check if optional
- is_optional = 'optional' in type_part
- param_type = type_part.replace('optional', '').strip().rstrip(',').strip()
-
- parameters.append({
- "name": param_name,
- "type": param_type,
- "description": description,
- "required": not is_optional
- })
-
- available_methods_json[method][action] = {
- "signature": sig,
- "parameters": parameters,
- "description": f"{method}.{action} action"
- }
-
- # Convert to a compact, AI-friendly format
- available_methods_str = f"""
-AVAILABLE ACTIONS (JSON format for better AI parsing):
-{json.dumps(available_methods_json, indent=1, separators=(',', ':'))}
-"""
- retry_context = ""
- if context.retry_count and context.retry_count > 0:
- retry_context = f"""
-RETRY CONTEXT (Attempt {context.retry_count}):
-Previous action results that failed or were incomplete:
-"""
- for i, result in enumerate(context.previous_action_results or []):
- retry_context += f"- Action {i+1}: ActionResult\n"
- retry_context += f" Status: {result.success and 'success' or 'failed'}\n"
- retry_context += f" Error: {result.error or 'None'}\n"
- # Check if result has documents and show document info
- if result.documents:
- doc_info = f"Documents: {len(result.documents)} document(s)"
- if result.documents[0].documentName:
- doc_info += f" - {result.documents[0].documentName}"
- retry_context += f" {doc_info}\n"
- else:
- retry_context += f" Documents: None\n"
-
- if context.previous_review_result:
- retry_context += f"""
-Previous review feedback:
-- Status: {context.previous_review_result.status or 'unknown'}
-- Reason: {context.previous_review_result.reason or 'No reason provided'}
-- Quality Score: {context.previous_review_result.quality_score or 0}/10
-- Unmet Criteria: {', '.join(context.previous_review_result.unmet_criteria or [])}
-"""
-
- # Use Pydantic model directly - no need for getattr
- success_criteria_str = ', '.join(context.task_step.success_criteria) if context.task_step and context.task_step.success_criteria else 'No criteria specified'
- previous_results_str = ', '.join(context.previous_results) if context.previous_results else 'None'
- improvements_str = str(context.improvements) if context.improvements else 'None'
- available_connections_str = '\n'.join(f"- {conn}" for conn in connRefs)
-
- # Get user language from service - this is the correct way
- user_language = service.user.language if service and service.user else 'en'
-
- # Get current workflow context for dynamic examples
- workflow_context = service.methodService.getWorkflowContext()
- current_round = workflow_context.get('currentRound', 0)
- current_task = workflow_context.get('currentTask', 1)
-
- prompt = f"""
-You are an action generation AI that creates specific actions to accomplish a task step with user-friendly messages.
-
-DOCUMENT REFERENCE TYPES:
-- docItem: Reference to a single document
-- docList: Reference to a group of documents
-- round{{round_number}}_task{{task_number}}_action{{action_number}}_{{context}}: Reference to resulting document list from previous action
-
-USAGE GUIDE:
-- Use docItem when you need a specific document: "docItem:doc_123:component_diagram.pdf"
-- Use docList when you need all documents in a group: "docList:msg_456:AnalysisResults"
-- Use round/task/action format when referencing outputs from previous actions: "round{current_round}_task{current_task}_action2_AnalysisResults"
-
-CRITICAL DOCUMENT REFERENCE RULES:
-- ONLY use the exact labels listed in AVAILABLE DOCUMENTS below, or result labels from previous actions
-- When generating multiple actions, you may only use as input documents those that are already present in AVAILABLE DOCUMENTS or produced by actions that come earlier in the list. Do NOT use as input any document label that will be produced by a later action.
-- If there are no documents available, you CANNOT create document extraction actions. Instead, prefer using web actions (web.search, web.scrape, web.crawl) when external information can satisfy the request; only generate a status/information report if the task truly requires user-provided documents.
-
-CURRENT WORKFLOW CONTEXT:
-- Current Round: {current_round}
-- Current Task: {current_task}
-- Use these values when creating resultLabel references
-
-TASK STEP: {context.task_step.objective if context.task_step else 'No task step specified'} (ID: {context.task_step.id if context.task_step else 'unknown'})
-
-SUCCESS CRITERIA: {success_criteria_str}
-
-CONTEXT - Chat History:
-{messageSummary}
-
-WORKFLOW CONTEXT - Previous Messages Summary:
-The following summarizes key information from previous workflow interactions to provide context for continued workflows:
-- Previous user inputs and their outcomes
-- Key decisions and findings from earlier tasks
-- Document processing results and insights
-- User preferences and requirements established
-
-This context helps ensure your actions build upon previous work and maintain consistency with the overall workflow objectives.
-
-AVAILABLE METHODS AND ACTIONS (with signatures):
-{available_methods_str}
-
-AVAILABLE CONNECTIONS:
-{available_connections_str}
-
-AVAILABLE DOCUMENTS:
-{available_documents_str}
-
-DOCUMENT REFERENCE EXAMPLES:
-β
CORRECT: Use exact references from AVAILABLE DOCUMENTS above or result labels from previous actions
-- "docList:msg_456:diagram_analysis_results" (access all documents in a list)
-- "docItem:doc_123:component_diagram.pdf" (access specific document)
-- "round{current_round}_task{current_task}_action3_contextinfo" (document list from previous action)
-
-β INCORRECT: These will cause errors
-- "msg_xxx:documents" (invalid format - missing docList/docItem prefix)
-- "task_2_results" (not a valid reference - use exact references from AVAILABLE DOCUMENTS)
-- Inventing document IDs not produces from a preceeding action
-
-PREVIOUS RESULTS: {previous_results_str}
-IMPROVEMENTS NEEDED: {improvements_str}
-
-PREVIOUS TASK HANDOVER CONTEXT:
-{context.previous_handover.workflowSummary if context.previous_handover and context.previous_handover.workflowSummary else 'No previous task handover available'}
-
-{retry_context}
-
-ACTION GENERATION PRINCIPLES:
-- Create meaningful actions per task step
-- Focus on business outcomes, not technical operations
-- Combine related operations into single actions when possible
-- Select the method that best fulfills the objective based on context (do not default to any specific method).
-- Each action must be self-contained and executable with the provided parameters
-- For document extraction, ensure prompts are specific and detailed
-- Include validation steps in extraction prompts where relevant
-- If this is a retry, learn from previous failures and improve the approach
-- Address specific issues mentioned in previous review feedback
-- When specifying expectedDocumentFormats, ensure AI prompts explicitly request pure data without markdown formatting
-- Generate user-friendly messages for each action in the user's language ({user_language})
-
-PARAMETER COMPLETENESS REQUIREMENTS:
-- Every parameter must contain all information needed to execute without implicit context
-- Use explicit, concrete values (units, languages, formats, limits, date ranges, IDs) when applicable
-- For search-like parameters (if any method requires a query), derive the query from the task objective AND ALL success criteria dimensions. Include:
- - Key entities and domain terms from the objective
- - All distinct facets from success_criteria (e.g., valuation AND AI potential AND know-how needs)
- - Geography/localization (e.g., Schweiz/Suisse/Switzerland; use multilingual synonyms when helpful)
- - Time horizon or recency if relevant
- - Boolean operators and synonyms to increase precision (use AND/OR, quotes, parentheses)
- - Avoid single-topic or generic queries focused only on one facet (e.g., pure valuation metrics)
- - When facets are truly distinct, create 1β3 focused actions with precise queries rather than one vague catch-all
-- Document list parameters must reference only existing labels or prior action outputs; do not reference future outputs
-
-USER LANGUAGE: {user_language} - All user messages must be generated in this language.
-
-DOCUMENT ROUTING GUIDANCE:
-- Each action should produce documents with a clear resultLabel for routing
-- Use consistent naming: "round{current_round}_task{{task_id}}_action{{action_number}}_{{descriptive_label}}"
-- Ensure document flow: Action A produces documents that Action B can consume
-- Document labels should be descriptive of content, not just "results" or "output"
-- Consider what subsequent actions will need and structure outputs accordingly
-
-INSTRUCTIONS:
-- Generate actions to accomplish this task step using available documents, connections, and previous results
-- Use docItem for single documents and docList for groups of documents as shown in AVAILABLE DOCUMENTS
-- If there are no documents available, do not create document extraction actions. Select methods strictly based on the task objective; choose web actions when external information is required. Otherwise, generate a status/information report requesting needed inputs.
-- Always pass documentList as a LIST of references (docItem and/or docList) - this list CANNOT be empty for document extraction actions
-- For referencing documents from previous actions, use the format "round{{round_number}}_task{{task_number}}_action{{action_number}}_{{context}}"
-- For resultLabel, use the format: "round{current_round}_task{{task_id}}_action{{action_number}}_{{short_label}}" where:
- - {{round_number}} = the current round number ({current_round})
- - {{task_id}} = the current task's id ({current_task})
- - {{action_number}} = the sequence number of the action within the task (e.g., 1, 2, 3)
- - {{short_label}} = a short, descriptive label for the output (e.g., "AnalysisResults")
- Example: "round{current_round}_task{current_task}_action1_AnalysisResults"
-- If this is a retry, ensure the new actions address the specific issues from previous attempts
-- Follow the JSON structure below. All fields are required.
-
-REQUIRED JSON STRUCTURE:
-{{
- "actions": [
- {{
- "method": "method_name", // Use only the method name (e.g., "document")
- "action": "action_name", // Use only the action name (e.g., "extract")
- "parameters": {{
- "documentList": ["docItem:doc_abc:round{current_round}_task{current_task}_action1_AnalysisResults", "round{current_round}_task{current_task}_action1_input"],
- "aiPrompt": "Comprehensive AI prompt describing what to accomplish"
- }},
- "resultLabel": "round{current_round}_task{current_task}_action2_AnalysisResults",
- "expectedDocumentFormats": [ // OPTIONAL: Specify expected document formats when needed
- {{
- "extension": ".txt",
- "mimeType": "text/plain",
- "description": "Structured data output"
- }}
- ],
- "description": "What this action accomplishes (business outcome)",
- "userMessage": "User-friendly message explaining what this action will do in the user's language"
- }}
- ]
-}}
-
-FIELD REQUIREMENTS:
-- "method": Must be from AVAILABLE METHODS
-- "action": Must be valid for the method
-- "parameters": Method-specific, must include documentList as a list if required by the signature
-- "resultLabel": Must follow the format above (e.g., "round{current_round}_task{current_task}_action3_AnalysisResults")
-- "expectedDocumentFormats": OPTIONAL - Only specify when you need to control output format
- - Use when you need specific file types (e.g., CSV for data, JSON for structured output)
- - Omit when format is flexible (e.g., folder queries with mixed file types)
- - Each format should specify: extension, mimeType, description
- - When using expectedDocumentFormats, ensure the aiPrompt explicitly requests pure data without markdown formatting
-- "description": Clear summary of the business outcome
-- "userMessage": User-friendly message explaining what the action will accomplish in the user's language
-
-EXAMPLES OF GOOD ACTIONS:
-
-1. Document analysis with specific output format and user message:
-{{
- "method": "document",
- "action": "extract",
- "parameters": {{
- "documentList": ["docItem:doc_57520394-6b6d-41c2-b641-bab3fc6d7f4b:candidate_profile.txt"],
- "aiPrompt": "Extract and analyze the candidate's qualifications, experience, skills, and suitability for the product designer position. Identify key strengths, relevant experience, technical skills, and any areas of concern. Provide a comprehensive assessment that can be used for evaluation."
- }},
- "resultLabel": "round{current_round}_task{current_task}_action2_candidate_analysis",
- "expectedDocumentFormats": [
- {{
- "extension": ".json",
- "mimeType": "application/json",
- "description": "Structured candidate analysis data"
- }}
- ],
- "description": "Comprehensive analysis of candidate profile for evaluation",
- "userMessage": "Ich analysiere das Kandidatenprofil und extrahiere alle wichtigen Informationen fΓΌr die Bewertung."
-}}
-
-2. Multi-document processing with user message:
-{{
- "method": "document",
- "action": "extract",
- "parameters": {{
- "documentList": ["docList:msg_456:candidate_analysis_results"],
- "aiPrompt": "Compare all candidate profiles and create an evaluation matrix. Rate each candidate on technical skills, experience level, cultural fit, portfolio quality, and communication skills. Provide clear rankings and recommendations for the product designer position."
- }},
- "resultLabel": "round{current_round}_task{current_task}_action5_evaluation_matrix",
- "description": "Create comprehensive evaluation matrix comparing all candidates",
- "userMessage": "Ich vergleiche alle Kandidatenprofile und erstelle eine umfassende Bewertungsmatrix mit klaren Empfehlungen."
-}}
-
-3. Data extraction with specific CSV format and user message:
-{{
- "method": "document",
- "action": "extract",
- "parameters": {{
- "documentList": ["docItem:doc_abc:table_data.pdf"],
- "aiPrompt": "Extract all table data and convert to structured CSV format with proper headers and data types. IMPORTANT: Deliver pure CSV data without any markdown formatting, code blocks, or additional text. Output only the CSV content with proper headers and data rows."
- }},
- "resultLabel": "round{current_round}_task{current_task}_action2_structured_data",
- "expectedDocumentFormats": [
- {{
- "extension": ".csv",
- "mimeType": "text/csv",
- "description": "Structured table data in CSV format"
- }}
- ],
- "description": "Extract and structure table data for analysis",
- "userMessage": "Ich extrahiere alle Tabellendaten und konvertiere sie in ein strukturiertes CSV-Format fΓΌr die weitere Analyse."
-}}
-
-4. Comprehensive summary report with user message:
-{{
- "method": "document",
- "action": "generate",
- "parameters": {{
- "documentList": ["docList:msg_456:candidate_analysis_results"],
- "title": "Comprehensive Candidate Evaluation Report"
- }},
- "resultLabel": "round{current_round}_task{current_task}_action6_summary_report",
- "description": "Generate a comprehensive, professional HTML report consolidating all candidate analyses and findings",
- "userMessage": "Ich erstelle einen umfassenden, professionellen Bericht, der alle Kandidatenanalysen und Erkenntnisse zusammenfasst."
-}}
-
-5. Correct chaining of actions within a task:
-{{
- "actions": [
- {{
- "method": "document",
- "action": "extract",
- "parameters": {{
- "documentList": ["docItem:doc_abc:round{current_round}_task{current_task}_action1_file1.txt"],
- "aiPrompt": "Extract data from file1."
- }},
- "resultLabel": "round{current_round}_task{current_task}_action1_extracted_data",
- "description": "Extract data from file1.",
- "userMessage": "Ich extrahiere die Daten aus der Datei."
- }},
- {{
- "method": "document",
- "action": "generate",
- "parameters": {{
- "documentList": ["round{current_round}_task{current_task}_action1_extracted_data"],
- "title": "Report"
- }},
- "resultLabel": "round{current_round}_task{current_task}_action2_report",
- "description": "Generate report from extracted data.",
- "userMessage": "Ich erstelle einen Bericht basierend auf den extrahierten Daten."
- }}
- ]
-}}
-
-IMPORTANT NOTES:
-- Respond with ONLY the JSON object. Do not include any explanatory text.
-- Before creating any document extraction action, verify that AVAILABLE DOCUMENTS contains actual document references.
-- Always include a user-friendly userMessage for each action in the user's language ({user_language}).
-- The examples above show German user messages as reference - adapt the language to match the USER LANGUAGE specified above."""
-
- # Removed sensitive data from debug logging
- logging.debug(f"[ACTION PLAN PROMPT] Document context and methods prepared")
-
- return prompt
-
-def createResultReviewPrompt(context: ReviewContext, service) -> str:
- """Create enhanced prompt for result review with user-friendly messages and document context"""
- # Build comprehensive action and result summary
- action_summary = ""
- for i, action in enumerate(context.task_actions or []):
- action_summary += f"\nACTION {i+1}: {action.execMethod}.{action.execAction}\n"
- action_summary += f" Status: {action.status}\n"
- if action.error:
- action_summary += f" Error: {action.error}\n"
- if action.resultDocuments:
- action_summary += f" Documents: {len(action.resultDocuments)} document(s)\n"
- for doc in action.resultDocuments:
- # Use Pydantic model properties directly
- fileName = doc.fileName
- fileSize = doc.fileSize
- mimeType = doc.mimeType
-
- action_summary += f" - {fileName} ({fileSize} bytes, {mimeType})\n"
- else:
- action_summary += f" Documents: None\n"
-
- # Build result summary with SIMPLE DOCUMENT VALIDATION
- result_summary = ""
- document_validation_summary = ""
- document_access_warnings = []
-
- if context.action_results:
- for i, result in enumerate(context.action_results):
- result_summary += f"\nRESULT {i+1}:\n"
- result_summary += f" Success: {result.success}\n"
- if result.error:
- result_summary += f" Error: {result.error}\n"
-
- if result.documents:
- result_summary += f" Documents: {len(result.documents)} document(s)\n"
- for doc in result.documents:
- # Use correct ActionDocument attributes
- doc_name = getattr(doc, 'documentName', 'Unknown')
- doc_mime = getattr(doc, 'mimeType', 'Unknown')
- doc_data = getattr(doc, 'documentData', None)
-
- result_summary += f" - {doc_name} ({doc_mime})\n"
-
- # SIMPLE VALIDATION: Check if documents exist and have basic properties
- validation_status = "β
Valid"
- if not doc_name or str(doc_name).strip() == "":
- validation_status = "β Missing document name"
- elif not doc_mime or str(doc_mime).strip() == "":
- validation_status = "β Missing MIME type"
- elif doc_data is None:
- validation_status = "β οΈ No document data"
- elif hasattr(doc_data, '__len__') and len(doc_data) == 0:
- validation_status = "β οΈ Empty document data"
-
- document_validation_summary += f" - {doc_name}: {validation_status}\n"
- else:
- result_summary += f" Documents: None\n"
- document_validation_summary += f" - No documents produced\n"
-
- # Get enhanced document context using the new method
- document_context = getEnhancedDocumentContext(service)
-
- # Get user language from service
- user_language = service.user.language if service and service.user else 'en'
-
- # Build warnings section (only for critical issues)
- warnings_section = ""
- if document_access_warnings:
- warnings_section = f"""
-β οΈ DOCUMENT VALIDATION ISSUES:
-{chr(10).join(f"- {warning}" for warning in document_access_warnings)}
-"""
-
- prompt = f"""
-You are a result review AI that evaluates task execution results and provides feedback with user-friendly messages.
-
-TASK OBJECTIVE: {context.task_step.objective if context.task_step else 'No task objective specified'}
-SUCCESS CRITERIA: {', '.join(context.task_step.success_criteria) if context.task_step and context.task_step.success_criteria else 'No success criteria specified'}
-
-EXECUTION SUMMARY:
-{action_summary}
-
-RESULT SUMMARY:
-{result_summary}
-
-{warnings_section}
-
-DOCUMENT VALIDATION SUMMARY:
-{document_validation_summary if document_validation_summary else "No documents to validate"}
-
-DOCUMENT CONTEXT (Available Documents):
-{document_context}
-
-PREVIOUS RESULTS: {', '.join(context.previous_results) if context.previous_results else 'None'}
-
-REVIEW INSTRUCTIONS:
-1. Evaluate if the task step was completed successfully
-2. Check if all success criteria were met
-3. Assess the quality and completeness of outputs
-4. Identify any missing or incomplete results
-5. Provide specific improvement suggestions
-6. Generate user-friendly messages explaining the results
-7. Return a JSON object with the exact structure shown below
-
-DOCUMENT VALIDATION FOCUS:
-- Check if the agreed result documents label is correct (matches expected format)
-- Verify that documents are actually present and have basic properties
-- Do NOT attempt to analyze document content deeply
-- Focus on document existence and basic metadata validation
-
-REQUIRED JSON STRUCTURE:
-{{
- "status": "success|retry|failed",
- "reason": "Brief explanation of the status",
- "improvements": ["improvement1", "improvement2"],
- "quality_score": 8, // 1-10 scale
- "missing_outputs": ["missing_output1", "missing_output2"],
- "met_criteria": ["criteria1", "criteria2"],
- "unmet_criteria": ["criteria3", "criteria4"],
- "confidence": 0.85, // 0.0-1.0 confidence level in this assessment
- "userMessage": "User-friendly message explaining the review results in the user's language"
-}}
-
-FIELD REQUIREMENTS:
-- "status": Overall task completion status
- - "success": All criteria met, high-quality outputs
- - "retry": Some criteria met, outputs need improvement and retry
- - "failed": Most criteria unmet, significant issues
-- "reason": Clear explanation of why this status was assigned
-- "improvements": List of specific, actionable improvements
-- "quality_score": 1-10 rating of output quality
-- "missing_outputs": List of expected outputs that were not produced
-- "met_criteria": List of success criteria that were fully met
-- "unmet_criteria": List of success criteria that were not met
-- "confidence": 0.0-1.0 confidence level in this assessment
-- "userMessage": User-friendly explanation of results in the user's language
-
-EXAMPLES OF GOOD IMPROVEMENTS:
-- "Increase AI prompt specificity for better data extraction"
-- "Add validation steps to ensure output completeness"
-- "Improve error handling for failed document processing"
-- "Enhance document format specifications for better output quality"
-
-EXAMPLES OF GOOD MISSING OUTPUTS:
-- "Structured analysis report in JSON format"
-- "Comparison matrix of candidate profiles"
-- "Data validation summary with quality metrics"
-- "Professional business communication document"
-
-QUALITY SCORE GUIDELINES:
-- 9-10: Exceptional quality, exceeds expectations
-- 7-8: Good quality, meets all requirements
-- 5-6: Acceptable quality, minor issues
-- 3-4: Poor quality, significant issues
-- 1-2: Very poor quality, major problems
-
-USER LANGUAGE: {user_language} - All user messages must be generated in this language.
-
-NOTE: Respond with ONLY the JSON object. Do not include any explanatory text."""
-
- return prompt
-
-# ===== New compact prompts for React-style workflow =====
-
-def _build_tiny_catalog(service) -> str:
- """Return minimal tool catalog: method -> { action -> [paramNames] }"""
- try:
- method_signatures = getMethodsList(service)
- except Exception:
- method_signatures = []
- catalog: Dict[str, Dict[str, List[str]]] = {}
- for sig in method_signatures:
- if '.' not in sig or '(' not in sig or ')' not in sig:
- continue
- method, rest = sig.split('.', 1)
- action = rest.split('(')[0]
- params_str = rest[rest.find('(')+1:rest.find(')')].strip()
- param_names = []
- if params_str:
- for p in params_str.split(','):
- name = p.strip().split(':')[0].split('=')[0].strip()
- if name:
- param_names.append(name)
- catalog.setdefault(method, {})[action] = param_names
- return json.dumps(catalog, separators=(',', ':'), ensure_ascii=False)
-
-def createActionSelectionPrompt(context: TaskContext, service) -> str:
- """Prompt that returns exactly one action selection: {"action":{"method":"..","name":".."}}"""
- user_language = service.user.language if service and service.user else 'en'
- tiny_catalog = _build_tiny_catalog(service)
- objective = context.task_step.objective if context and context.task_step else ''
- available_docs = _getAvailableDocuments(context.workflow) if context and context.workflow else "No documents available"
- return f"""Select exactly one action to advance the task.
-
-OBJECTIVE: {objective}
-AVAILABLE DOCUMENTS: {available_docs}
-USER LANGUAGE: {user_language}
-
-MINIMAL TOOL CATALOG (method -> action -> [parameterNames]):
-{tiny_catalog}
-
-BUSINESS RULES:
-- Pick exactly one action per step.
-- Derive choice from objective and success criteria.
-- Prefer user language.
-- Keep it minimal; avoid provider specifics.
-
-RESPONSE FORMAT (JSON only):
-{{"action":{{"method":"web","name":"search"}}}}
-"""
-
-def createActionParameterPrompt(context: TaskContext, selected_action: Dict[str, str], service=None) -> str:
- """Prompt that returns only parameters for the selected action: {"parameters":{...}}"""
- user_language = service.user.language if service and service.user else 'en'
- method = selected_action.get('method', '') if selected_action else ''
- name = selected_action.get('name', '') if selected_action else ''
- available_docs = _getAvailableDocuments(context.workflow) if context and context.workflow else "No documents available"
-
- # Get action signature from service center
- action_signature = ""
- if service and method in methods:
- method_instance = methods[method]['instance']
- action_signature = method_instance.getActionSignature(name)
-
- return f"""Provide only the required parameters for this action.
-
-SELECTED ACTION: {method}.{name}
-ACTION SIGNATURE: {action_signature}
-OBJECTIVE: {context.task_step.objective if context and context.task_step else ''}
-AVAILABLE DOCUMENTS: {available_docs}
-USER LANGUAGE: {user_language}
-
-RULES:
-- Return only the parameters object.
-- Include user language if relevant.
-- Reference documents only by exact labels available.
-- Avoid unnecessary fields; host applies defaults.
-- Use the ACTION SIGNATURE above to understand what parameters are required.
-- Convert the objective into appropriate parameter values as needed.
-
-RESPONSE FORMAT (JSON only):
-{{"parameters":{{}}}}
-"""
-
-def createRefinementPrompt(context: TaskContext, observation: Dict[str, Any]) -> str:
- """Prompt that decides to continue or stop based on observation: {"decision":"continue|stop","reason":".."} """
- user_language = context.workflow.messages[-1].role if False else (getattr(context.workflow, 'user_language', None) or (getattr(context.workflow, 'language', None))) # not used, keep minimal
- objective = context.task_step.objective if context and context.task_step else ''
- return f"""Decide next step based on observation.
-
-OBJECTIVE: {objective}
-OBSERVATION:
-{json.dumps(observation, ensure_ascii=False)}
-
-RULES:
-- If criteria are met or no further action helps, decide stop.
-- Else decide continue.
-
-RESPONSE FORMAT (JSON only):
-{{"decision":"continue","reason":"Need more data"}}
-"""
\ No newline at end of file
diff --git a/modules/workflows/processing/shared/__init__.py b/modules/workflows/processing/shared/__init__.py
new file mode 100644
index 00000000..2f6387a6
--- /dev/null
+++ b/modules/workflows/processing/shared/__init__.py
@@ -0,0 +1 @@
+# Shared workflow utilities
diff --git a/modules/workflows/processing/executionState.py b/modules/workflows/processing/shared/executionState.py
similarity index 100%
rename from modules/workflows/processing/executionState.py
rename to modules/workflows/processing/shared/executionState.py
diff --git a/modules/workflows/processing/shared/promptFactory.py b/modules/workflows/processing/shared/promptFactory.py
new file mode 100644
index 00000000..bb177a19
--- /dev/null
+++ b/modules/workflows/processing/shared/promptFactory.py
@@ -0,0 +1,321 @@
+# promptFactory.py
+# Enhanced prompt factory with reusable functions
+
+import json
+import logging
+import importlib
+import pkgutil
+import inspect
+from typing import Any, Dict, List
+from modules.datamodels.datamodelWorkflow import TaskContext, ReviewContext, DocumentExchange
+from modules.datamodels.datamodelChat import ChatDocument
+from modules.services.serviceGeneration.subDocumentUtility import getFileExtension
+from modules.workflows.methods.methodBase import MethodBase
+
+# Set up logger
+logger = logging.getLogger(__name__)
+
+# Global methods catalog - moved from serviceCenter
+methods = {}
+
+def discoverMethods(serviceCenter):
+ """Dynamically discover all method classes and their actions in modules methods package"""
+ try:
+ # Import the methods package
+ methodsPackage = importlib.import_module('modules.workflows.methods')
+
+ # Discover all modules in the package
+ for _, name, isPkg in pkgutil.iter_modules(methodsPackage.__path__):
+ if not isPkg and name.startswith('method'):
+ try:
+ # Import the module
+ module = importlib.import_module(f'modules.workflows.methods.{name}')
+
+ # Find all classes in the module that inherit from MethodBase
+ for itemName, item in inspect.getmembers(module):
+ if (inspect.isclass(item) and
+ issubclass(item, MethodBase) and
+ item != MethodBase):
+ # Instantiate the method
+ methodInstance = item(serviceCenter)
+
+ # Use the actions property from MethodBase which handles @action decorator
+ actions = methodInstance.actions
+
+ # Create method info
+ methodInfo = {
+ 'instance': methodInstance,
+ 'actions': actions,
+ 'description': item.__doc__ or f"Method {itemName}"
+ }
+
+ # Store the method with full class name
+ methods[itemName] = methodInfo
+
+ # Also store with short name for action executor access
+ shortName = itemName.replace('Method', '').lower()
+ methods[shortName] = methodInfo
+
+ logger.info(f"Discovered method {itemName} (short: {shortName}) with {len(actions)} actions")
+
+ except Exception as e:
+ logger.error(f"Error discovering method {name}: {str(e)}")
+ continue
+
+ logger.info(f"Discovered {len(methods)} method entries total")
+
+ except Exception as e:
+ logger.error(f"Error discovering methods: {str(e)}")
+
+def getMethodsList(serviceCenter):
+ """Get a list of available methods with their signatures"""
+ if not methods:
+ discoverMethods(serviceCenter)
+
+ methodsList = []
+ for methodName, methodInfo in methods.items():
+ methodDescription = methodInfo['description']
+ actionsList = []
+
+ for actionName, actionInfo in methodInfo['actions'].items():
+ actionDescription = actionInfo['description']
+ parameters = actionInfo['parameters']
+
+ # Build parameter signature
+ paramSig = []
+ for paramName, paramInfo in parameters.items():
+ paramType = paramInfo['type']
+ paramRequired = paramInfo['required']
+ paramDefault = paramInfo['default']
+
+ if paramRequired:
+ paramSig.append(f"{paramName}: {paramType}")
+ else:
+ defaultStr = f" = {paramDefault}" if paramDefault is not None else " = None"
+ paramSig.append(f"{paramName}: {paramType}{defaultStr}")
+
+ paramSignature = f"({', '.join(paramSig)})" if paramSig else "()"
+ actionsList.append(f"- {actionName}{paramSignature}: {actionDescription}")
+
+ actionsStr = "\n".join(actionsList)
+ methodsList.append(f"**{methodName}**: {methodDescription}\n{actionsStr}")
+
+ return "\n\n".join(methodsList)
+
+# Reusable prompt element functions
+def getAvailableDocuments(context: Any) -> str:
+ """Get available documents for prompt context"""
+ try:
+ if not context or not hasattr(context, 'available_documents') or not context.available_documents:
+ return "No documents available"
+
+ documents = context.available_documents
+ if not isinstance(documents, list):
+ return "No documents available"
+
+ docList = []
+ for i, doc in enumerate(documents, 1):
+ if isinstance(doc, ChatDocument):
+ docInfo = f"{i}. **{doc.fileName}**"
+ if hasattr(doc, 'mimeType') and doc.mimeType:
+ docInfo += f" ({doc.mimeType})"
+ if hasattr(doc, 'size') and doc.size:
+ docInfo += f" - {doc.size} bytes"
+ docList.append(docInfo)
+ elif isinstance(doc, dict):
+ docInfo = f"{i}. **{doc.get('fileName', 'Unknown')}**"
+ if doc.get('mimeType'):
+ docInfo += f" ({doc['mimeType']})"
+ if doc.get('size'):
+ docInfo += f" - {doc['size']} bytes"
+ docList.append(docInfo)
+ else:
+ docList.append(f"{i}. {str(doc)}")
+
+ return "\n".join(docList) if docList else "No documents available"
+ except Exception as e:
+ logger.error(f"Error getting available documents: {str(e)}")
+ return "Error retrieving documents"
+
+def getWorkflowHistory(services, context: Any) -> str:
+ """Get workflow history for prompt context"""
+ try:
+ if not context or not hasattr(context, 'workflow_id'):
+ return "No workflow history available"
+
+ workflowId = context.workflow_id
+ if not workflowId:
+ return "No workflow history available"
+
+ # Get workflow messages
+ messages = services.interfaceDbChat.getWorkflowMessages(workflowId)
+ if not messages:
+ return "No workflow history available"
+
+ # Filter for relevant messages (last 10)
+ recentMessages = messages[-10:] if len(messages) > 10 else messages
+
+ historyList = []
+ for msg in recentMessages:
+ if hasattr(msg, 'role') and hasattr(msg, 'message'):
+ role = "User" if msg.role == "user" else "Assistant"
+ message = msg.message[:200] + "..." if len(msg.message) > 200 else msg.message
+ historyList.append(f"**{role}**: {message}")
+
+ return "\n".join(historyList) if historyList else "No workflow history available"
+ except Exception as e:
+ logger.error(f"Error getting workflow history: {str(e)}")
+ return "Error retrieving workflow history"
+
+def getAvailableMethods(services) -> str:
+ """Get available methods for prompt context"""
+ try:
+ if not methods:
+ discoverMethods(services)
+
+ return getMethodsList(services)
+ except Exception as e:
+ logger.error(f"Error getting available methods: {str(e)}")
+ return "Error retrieving available methods"
+
+def getEnhancedDocumentContext(services) -> str:
+ """Get enhanced document context with full metadata"""
+ try:
+ # Get all documents from the current workflow
+ workflow = getattr(services, 'workflow', None)
+ if not workflow or not hasattr(workflow, 'id'):
+ return "No workflow context available"
+
+ # Get workflow documents
+ documents = services.interfaceDbChat.getWorkflowDocuments(workflow.id)
+ if not documents:
+ return "No documents available"
+
+ docList = []
+ for i, doc in enumerate(documents, 1):
+ if isinstance(doc, ChatDocument):
+ docInfo = f"{i}. **{doc.fileName}**"
+ if hasattr(doc, 'mimeType') and doc.mimeType:
+ docInfo += f" ({doc.mimeType})"
+ if hasattr(doc, 'size') and doc.size:
+ docInfo += f" - {doc.size} bytes"
+ if hasattr(doc, 'created') and doc.created:
+ docInfo += f" - Created: {doc.created}"
+ if hasattr(doc, 'modified') and doc.modified:
+ docInfo += f" - Modified: {doc.modified}"
+ docList.append(docInfo)
+ elif isinstance(doc, dict):
+ docInfo = f"{i}. **{doc.get('fileName', 'Unknown')}**"
+ if doc.get('mimeType'):
+ docInfo += f" ({doc['mimeType']})"
+ if doc.get('size'):
+ docInfo += f" - {doc['size']} bytes"
+ if doc.get('created'):
+ docInfo += f" - Created: {doc['created']}"
+ if doc.get('modified'):
+ docInfo += f" - Modified: {doc['modified']}"
+ docList.append(docInfo)
+ else:
+ docList.append(f"{i}. {str(doc)}")
+
+ return "\n".join(docList) if docList else "No documents available"
+ except Exception as e:
+ logger.error(f"Error getting enhanced document context: {str(e)}")
+ return "Error retrieving document context"
+
+def getConnectionReferenceList(services) -> List[str]:
+ """Get list of available connections"""
+ try:
+ # Get connections from services
+ if hasattr(services, 'connection') and hasattr(services.connection, 'getConnections'):
+ connections = services.connection.getConnections()
+ if connections:
+ return [f"{conn.get('name', 'Unknown')} ({conn.get('type', 'Unknown')})" for conn in connections]
+
+ return []
+ except Exception as e:
+ logger.error(f"Error getting connection reference list: {str(e)}")
+ return []
+
+def getUserLanguage(services) -> str:
+ """Get user language from services"""
+ try:
+ if hasattr(services, 'user') and hasattr(services.user, 'language'):
+ return services.user.language or 'en'
+ return 'en'
+ except Exception as e:
+ logger.error(f"Error getting user language: {str(e)}")
+ return 'en'
+
+def getReviewContent(context: Any) -> str:
+ """Get review content for prompt context"""
+ try:
+ if not context or not hasattr(context, 'observation'):
+ return "No review content available"
+
+ observation = context.observation
+ if not isinstance(observation, dict):
+ return "No review content available"
+
+ reviewParts = []
+
+ # Add success status
+ if 'success' in observation:
+ reviewParts.append(f"Success: {observation['success']}")
+
+ # Add documents count
+ if 'documentsCount' in observation:
+ reviewParts.append(f"Documents generated: {observation['documentsCount']}")
+
+ # Add previews
+ if 'previews' in observation and observation['previews']:
+ reviewParts.append("Document previews:")
+ for preview in observation['previews']:
+ if isinstance(preview, dict):
+ name = preview.get('name', 'Unknown')
+ mimeType = preview.get('mimeType', 'Unknown')
+ size = preview.get('contentSize', 'Unknown size')
+ reviewParts.append(f" - {name} ({mimeType}) - {size}")
+
+ # Add notes
+ if 'notes' in observation and observation['notes']:
+ reviewParts.append("Notes:")
+ for note in observation['notes']:
+ reviewParts.append(f" - {note}")
+
+ return "\n".join(reviewParts) if reviewParts else "No review content available"
+ except Exception as e:
+ logger.error(f"Error getting review content: {str(e)}")
+ return "Error retrieving review content"
+
+def getPreviousRoundContext(services, context: Any) -> str:
+ """Get previous round context for prompt"""
+ try:
+ if not context or not hasattr(context, 'workflow_id'):
+ return "No previous round context available"
+
+ workflowId = context.workflow_id
+ if not workflowId:
+ return "No previous round context available"
+
+ # Get previous round results
+ previousResults = getattr(context, 'previous_results', [])
+ if not previousResults:
+ return "No previous round context available"
+
+ contextList = []
+ for i, result in enumerate(previousResults, 1):
+ if hasattr(result, 'success') and hasattr(result, 'resultLabel'):
+ status = "Success" if result.success else "Failed"
+ contextList.append(f"{i}. {result.resultLabel} - {status}")
+ elif isinstance(result, dict):
+ status = "Success" if result.get('success', False) else "Failed"
+ label = result.get('resultLabel', 'Unknown')
+ contextList.append(f"{i}. {label} - {status}")
+ else:
+ contextList.append(f"{i}. {str(result)}")
+
+ return "\n".join(contextList) if contextList else "No previous round context available"
+ except Exception as e:
+ logger.error(f"Error getting previous round context: {str(e)}")
+ return "Error retrieving previous round context"
diff --git a/modules/workflows/processing/promptFactoryPlaceholders.py b/modules/workflows/processing/shared/promptFactoryPlaceholders.py
similarity index 62%
rename from modules/workflows/processing/promptFactoryPlaceholders.py
rename to modules/workflows/processing/shared/promptFactoryPlaceholders.py
index 050eec09..9c3aea07 100644
--- a/modules/workflows/processing/promptFactoryPlaceholders.py
+++ b/modules/workflows/processing/shared/promptFactoryPlaceholders.py
@@ -4,20 +4,24 @@ This module provides prompt templates with placeholders that can be filled dynam
"""
import json
+import logging
from typing import Dict, Any
-from modules.workflows.processing.promptFactory import (
- _getAvailableDocuments,
- _getPreviousRoundContext,
+
+logger = logging.getLogger(__name__)
+from modules.workflows.processing.shared.promptFactory import (
+ getAvailableDocuments,
+ getPreviousRoundContext,
getMethodsList,
getEnhancedDocumentContext,
- _getConnectionReferenceList,
- methods
+ getConnectionReferenceList,
+ methods,
+ discoverMethods
)
def createTaskPlanningPromptTemplate() -> str:
"""Create task planning prompt template with placeholders."""
- return """You are a task planning AI that breaks down user requests into logical, executable task steps.
+ return """Break down user requests into logical, executable task steps.
USER REQUEST:
{{KEY:USER_PROMPT}}
@@ -29,9 +33,11 @@ PREVIOUS WORKFLOW ROUNDS:
{{KEY:WORKFLOW_HISTORY}}
TASK PLANNING RULES:
-- COMBINE related activities into single tasks to avoid fragmentation
-- Focus on business value and meaningful outcomes
-- Keep tasks at appropriate abstraction level (not implementation details)
+- Focus on DELIVERING what the user asked for, not how to do it
+- For DATA requests (numbers, lists, calculations): Plan to deliver the actual data
+- For DOCUMENT requests (Word, PDF, Excel): Plan to create the formatted document
+- For ANALYSIS requests: Plan to analyze and deliver insights
+- Keep tasks simple and focused on outcomes, not implementation details
- Each task should produce usable results for subsequent tasks
- If retry request, analyze previous rounds to understand what failed
@@ -43,7 +49,7 @@ REQUIRED JSON STRUCTURE:
"tasks": [
{{
"id": "task_1",
- "objective": "Clear business objective combining related activities",
+ "objective": "Clear business objective focusing on what to deliver",
"dependencies": ["task_0"],
"success_criteria": ["measurable criteria 1", "measurable criteria 2"],
"estimated_complexity": "low|medium|high",
@@ -57,7 +63,7 @@ RESPONSE: Return ONLY the JSON object."""
def createActionDefinitionPromptTemplate() -> str:
"""Create action definition prompt template with placeholders."""
- return """You are an action planning AI that generates specific, executable actions for task steps.
+ return """Generate the next action to advance toward completing the task objective.
TASK OBJECTIVE: {{KEY:USER_PROMPT}}
@@ -69,26 +75,13 @@ AVAILABLE METHODS: {{KEY:AVAILABLE_METHODS}}
USER LANGUAGE: {{KEY:USER_LANGUAGE}}
-ACTION SELECTION RULES:
-- Use document.generateReport for creating formatted documents (Word, PDF, Excel, etc.)
-- Use ai.process for text analysis, Q&A, research, brainstorming (plain text only)
-- Use web.search for external information gathering
-- Use document.extract for analyzing existing documents
-- If no documents available, use web actions or create status reports
-
-PARAMETER REQUIREMENTS:
-- documentList must be a LIST of references from AVAILABLE DOCUMENTS
-- Use specific, detailed prompts for document actions
-- Include all necessary parameters for execution
-- Reference previous action outputs using: "round{current_round}_task{current_task}_action{action_number}_{label}"
-
-REQUIRED JSON STRUCTURE:
+REQUIRED JSON STRUCTURE FOR YOUR RESPONSE:
{{
"actions": [
{{
"method": "method_name",
"action": "action_name",
- "parameters": {{}},
+ "parameters": {},
"resultLabel": "round{current_round}_task{current_task}_action{action_number}_{descriptive_label}",
"description": "What this action accomplishes",
"userMessage": "User-friendly message in {{KEY:USER_LANGUAGE}}"
@@ -107,22 +100,43 @@ OBJECTIVE: {{KEY:USER_PROMPT}}
AVAILABLE DOCUMENTS: {{KEY:AVAILABLE_DOCUMENTS}}
USER LANGUAGE: {{KEY:USER_LANGUAGE}}
-MINIMAL TOOL CATALOG (method -> action -> [parameterNames]):
+AVAILABLE METHODS:
{{KEY:AVAILABLE_METHODS}}
-BUSINESS RULES:
-- Pick exactly one action per step.
-- Derive choice from objective and success criteria.
-- Prefer user language.
-- Keep it minimal; avoid provider specifics.
+CRITICAL: Return ONLY the method and action name. Do NOT include parameters or prompts.
-RESPONSE FORMAT (JSON only):
-{{"action":{{"method":"web","name":"search"}}}}"""
+REQUIRED JSON FORMAT:
+{"action":{"method":"method_name","name":"action_name"}}
+
+EXAMPLES:
+{"action":{"method":"ai","name":"process"}}
+{"action":{"method":"document","name":"generate"}}
+{"action":{"method":"web","name":"search"}}"""
def createActionParameterPromptTemplate() -> str:
"""Create action parameter prompt template with placeholders."""
- return """Provide only the required parameters for this action.
+ return """CRITICAL: You MUST wrap all parameters in a "parameters" object!
+
+MANDATORY RESPONSE FORMAT:
+{"parameters":{"parameterName": "parameterValue"}}
+
+EXAMPLES:
+For aiPrompt parameter: {"parameters":{"aiPrompt": "Your prompt here"}}
+For multiple parameters: {"parameters":{"aiPrompt": "Your prompt here", "language": "en"}}
+
+WRONG FORMAT (DO NOT USE):
+{"aiPrompt": "Your prompt here"}
+```json
+{"aiPrompt": "Your prompt here"}
+```
+
+CORRECT FORMAT (MUST USE):
+{"parameters":{"aiPrompt": "Your prompt here"}}
+
+DO NOT use code blocks or markdown. Return ONLY the JSON object with parameters wrapped in "parameters".
+
+Provide only the required parameters for this action.
SELECTED ACTION: {{KEY:SELECTED_ACTION}}
@@ -150,14 +164,33 @@ CRITICAL RULES:
- For documentList parameters: Use docList references when you need multiple documents
- For documentList parameters: Use docItem references when you need specific documents
- For connectionReference parameters: Use the exact connection reference from AVAILABLE CONNECTIONS
-- Return only the parameters object as JSON
- Include user language if relevant
- Avoid unnecessary fields; host applies defaults
- Use the ACTION SIGNATURE above to understand what parameters are required
- Convert the objective into appropriate parameter values as needed
-RESPONSE FORMAT (JSON only):
-{{"parameters":{{}}}}"""
+CRITICAL: You MUST wrap all parameters in a "parameters" object!
+
+MANDATORY RESPONSE FORMAT:
+{"parameters":{"parameterName": "parameterValue"}}
+
+EXAMPLES:
+For aiPrompt parameter:
+{"parameters":{"aiPrompt": "Your prompt here"}}
+
+For multiple parameters:
+{"parameters":{"aiPrompt": "Your prompt here", "language": "en"}}
+
+WRONG FORMAT (DO NOT USE):
+{"aiPrompt": "Your prompt here"}
+```json
+{"aiPrompt": "Your prompt here"}
+```
+
+CORRECT FORMAT (MUST USE):
+{"parameters":{"aiPrompt": "Your prompt here"}}
+
+DO NOT use code blocks or markdown. Return ONLY the JSON object with parameters wrapped in "parameters"."""
def createRefinementPromptTemplate() -> str:
@@ -168,17 +201,25 @@ OBJECTIVE: {{KEY:USER_PROMPT}}
OBSERVATION:
{{KEY:REVIEW_CONTENT}}
-RULES:
-- If criteria are met or no further action helps, decide stop.
-- Else decide continue.
+CRITICAL RULES:
+- If user wants DATA (numbers, lists, calculations): Ensure AI delivers the actual data, not code
+- If user wants DOCUMENTS (Word, PDF, Excel): Ensure appropriate method is used to create the document
+- If user wants ANALYSIS: Ensure AI analyzes and delivers insights
+- NEVER accept code when user wants data - demand the actual data
+- NEVER accept algorithms when user wants results - demand the actual results
+
+DECISION RULES:
+- If the objective is fulfilled (user got what they asked for), decide stop
+- If the objective is not fulfilled (user didn't get what they asked for), decide continue
+- Focus on what the user actually wants, not what was delivered
RESPONSE FORMAT (JSON only):
-{{"decision":"continue","reason":"Need more data"}}"""
+{"decision":"continue","reason":"Need more data"}"""
def createResultReviewPromptTemplate() -> str:
"""Create result review prompt template with placeholders."""
- return """You are a result validation AI that reviews task execution outcomes and determines success, retry needs, or failure.
+ return """Review task execution outcomes and determine success, retry needs, or failure.
TASK OBJECTIVE: {{KEY:USER_PROMPT}}
@@ -229,63 +270,40 @@ def extractUserPrompt(context) -> str:
def extractAvailableDocuments(context) -> str:
"""Extract available documents from context."""
if hasattr(context, 'workflow') and context.workflow:
- return _getAvailableDocuments(context.workflow)
+ return getAvailableDocuments(context.workflow)
return "No documents available"
def extractWorkflowHistory(service, context) -> str:
"""Extract workflow history from context."""
if hasattr(context, 'workflow') and context.workflow:
- return _getPreviousRoundContext(service, context.workflow) or "No previous workflow rounds - this is the first round."
+ return getPreviousRoundContext(service, context.workflow) or "No previous workflow rounds - this is the first round."
return "No previous workflow rounds - this is the first round."
def extractAvailableMethods(service) -> str:
"""Extract available methods for action planning."""
- methodList = getMethodsList(service)
- method_actions = {}
- for sig in methodList:
- if '.' in sig:
- method, rest = sig.split('.', 1)
- action = rest.split('(')[0]
- method_actions.setdefault(method, []).append((action, sig))
-
- # Create a structured JSON format for better AI parsing
- available_methods_json = {}
- for method, actions in method_actions.items():
- available_methods_json[method] = {}
- # Get the method instance for accessing docstrings
- method_instance = methods.get(method, {}).get('instance') if methods else None
+ try:
+ # Get the methods dictionary directly from the global methods variable
+ if not methods:
+ discoverMethods(service)
- for action, sig in actions:
- # Get the main action description (not parameters) for Step 1 action selection
- action_description = ""
+ # Create a structured JSON format for better AI parsing
+ available_methods_json = {}
+ for methodName, methodInfo in methods.items():
+ # Convert MethodAi -> ai, MethodDocument -> document, etc.
+ shortName = methodName.replace('Method', '').lower()
+ available_methods_json[shortName] = {}
- # Get the actual function's docstring
- if method_instance and hasattr(method_instance, action):
- func = getattr(method_instance, action)
- if hasattr(func, '__doc__') and func.__doc__:
- docstring = func.__doc__
-
- # Extract main description (everything before "Parameters:")
- lines = docstring.split('\n')
- description_lines = []
- for line in lines:
- line = line.strip()
- if line.startswith('Parameters:'):
- break
- if line and not line.startswith('@'):
- description_lines.append(line)
-
- action_description = ' '.join(description_lines).strip()
-
- # If no description found, create a basic one
- if not action_description:
- action_description = f"Execute {method}.{action} action"
-
- available_methods_json[method][action] = action_description
-
- return json.dumps(available_methods_json, indent=2, ensure_ascii=False)
+ for actionName, actionInfo in methodInfo['actions'].items():
+ # Get the action description
+ action_description = actionInfo.get('description', f"Execute {actionName} action")
+ available_methods_json[shortName][actionName] = action_description
+
+ return json.dumps(available_methods_json, indent=2, ensure_ascii=False)
+ except Exception as e:
+ logger.error(f"Error extracting available methods: {str(e)}")
+ return json.dumps({}, indent=2, ensure_ascii=False)
def extractUserLanguage(service) -> str:
@@ -341,5 +359,22 @@ def extractReviewContent(context) -> str:
return json.dumps(obs_copy, indent=2, ensure_ascii=False)
else:
return json.dumps(context.observation, ensure_ascii=False)
+ elif hasattr(context, 'step_result') and context.step_result and 'observation' in context.step_result:
+ # For observation data in step_result, show full content but handle documents specially
+ observation = context.step_result['observation']
+ if isinstance(observation, dict):
+ # Create a copy to modify
+ obs_copy = observation.copy()
+
+ # If there are previews with documents, show only metadata
+ if 'previews' in obs_copy and isinstance(obs_copy['previews'], list):
+ for preview in obs_copy['previews']:
+ if isinstance(preview, dict) and 'snippet' in preview:
+ # Replace snippet with metadata indicator
+ preview['snippet'] = f"[Content: {len(preview.get('snippet', ''))} characters]"
+
+ return json.dumps(obs_copy, indent=2, ensure_ascii=False)
+ else:
+ return json.dumps(observation, ensure_ascii=False)
else:
return "No review content available"
diff --git a/modules/workflows/processing/workflowProcessor.py b/modules/workflows/processing/workflowProcessor.py
new file mode 100644
index 00000000..9031ac30
--- /dev/null
+++ b/modules/workflows/processing/workflowProcessor.py
@@ -0,0 +1,335 @@
+# workflowProcessor.py
+# Main workflow processor with delegation pattern
+
+import logging
+from typing import Dict, Any, Optional, List
+from modules.datamodels.datamodelWorkflow import TaskStep, TaskContext, TaskPlan, TaskResult, ReviewResult
+from modules.datamodels.datamodelChat import ChatWorkflow
+from modules.workflows.processing.modes.baseMode import BaseMode
+from modules.workflows.processing.modes.actionplanMode import ActionplanMode
+from modules.workflows.processing.modes.reactMode import ReactMode
+
+logger = logging.getLogger(__name__)
+
+class WorkflowStoppedException(Exception):
+ """Exception raised when a workflow is stopped by the user."""
+ pass
+
+class WorkflowProcessor:
+ """Main workflow processor that delegates to appropriate mode implementations"""
+
+ def __init__(self, services, workflow=None):
+ self.services = services
+ self.workflow = workflow
+ self.mode = self._createMode(workflow.workflowMode if workflow else "Actionplan")
+
+ def _createMode(self, workflowMode: str) -> BaseMode:
+ """Create the appropriate mode implementation based on workflow mode"""
+ if workflowMode == "React":
+ return ReactMode(self.services, self.workflow)
+ else:
+ return ActionplanMode(self.services, self.workflow)
+
+ def _checkWorkflowStopped(self, workflow):
+ """Check if workflow has been stopped by user and raise exception if so"""
+ try:
+ # Get the current workflow status from the database to avoid stale data
+ current_workflow = self.services.interfaceDbChat.getWorkflow(workflow.id)
+ if current_workflow and current_workflow.status == "stopped":
+ logger.info("Workflow stopped by user, aborting processing")
+ raise WorkflowStoppedException("Workflow was stopped by user")
+ except Exception as e:
+ # If we can't get the current status due to other database issues, fall back to the in-memory object
+ logger.warning(f"Could not check current workflow status from database: {str(e)}")
+ if workflow and workflow.status == "stopped":
+ logger.info("Workflow stopped by user (from in-memory object), aborting processing")
+ raise WorkflowStoppedException("Workflow was stopped by user")
+
+ async def generateTaskPlan(self, userInput: str, workflow: ChatWorkflow) -> TaskPlan:
+ """Generate a high-level task plan for the workflow"""
+ try:
+ # Check workflow status before generating task plan
+ self._checkWorkflowStopped(workflow)
+
+ logger.info(f"=== STARTING TASK PLAN GENERATION ===")
+ logger.info(f"Workflow ID: {workflow.id}")
+ logger.info(f"User Input: {userInput}")
+ logger.info(f"Workflow Mode: {workflow.workflowMode}")
+
+ # Delegate to the appropriate mode
+ taskPlan = await self.mode.generateTaskPlan(userInput, workflow)
+
+ # Create task plan message
+ await self.mode.createTaskPlanMessage(taskPlan, workflow)
+
+ return taskPlan
+ except Exception as e:
+ logger.error(f"Error in generateTaskPlan: {str(e)}")
+ raise
+
+ async def executeTask(self, taskStep: TaskStep, workflow: ChatWorkflow, context: TaskContext,
+ taskIndex: int = None, totalTasks: int = None) -> TaskResult:
+ """Execute a task step using the appropriate mode"""
+ try:
+ # Check workflow status before executing task
+ self._checkWorkflowStopped(workflow)
+
+ logger.info(f"=== STARTING TASK EXECUTION ===")
+ logger.info(f"Task: {taskStep.objective}")
+ logger.info(f"Mode: {workflow.workflowMode}")
+
+ # Delegate to the appropriate mode
+ return await self.mode.executeTask(taskStep, workflow, context, taskIndex, totalTasks)
+ except Exception as e:
+ logger.error(f"Error in executeTask: {str(e)}")
+ raise
+
+ async def generateTaskActions(self, taskStep: TaskStep, workflow: ChatWorkflow,
+ previousResults: List = None, enhancedContext: TaskContext = None) -> List:
+ """Generate actions for a task step using the appropriate mode"""
+ try:
+ # Check workflow status before generating actions
+ self._checkWorkflowStopped(workflow)
+
+ logger.info(f"=== STARTING ACTION GENERATION ===")
+ logger.info(f"Task: {taskStep.objective}")
+ logger.info(f"Mode: {workflow.workflowMode}")
+
+ # Delegate to the appropriate mode
+ return await self.mode.generateTaskActions(taskStep, workflow, previousResults, enhancedContext)
+ except Exception as e:
+ logger.error(f"Error in generateTaskActions: {str(e)}")
+ raise
+
+ def updateWorkflowAfterTaskPlanCreated(self, totalTasks: int):
+ """Update workflow object after task plan creation"""
+ try:
+ updateData = {
+ "totalTasks": totalTasks,
+ "currentTask": 0,
+ "currentAction": 0,
+ "totalActions": 0
+ }
+
+ # Update workflow object
+ self.workflow.totalTasks = totalTasks
+ self.workflow.currentTask = 0
+ self.workflow.currentAction = 0
+ self.workflow.totalActions = 0
+
+ # Update in database
+ self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
+ logger.info(f"Updated workflow {self.workflow.id} after task plan creation: {updateData}")
+
+ except Exception as e:
+ logger.error(f"Error updating workflow after task plan creation: {str(e)}")
+
+ def updateWorkflowBeforeExecutingTask(self, taskNumber: int):
+ """Update workflow object before executing a task"""
+ try:
+ updateData = {
+ "currentTask": taskNumber,
+ "currentAction": 0,
+ "totalActions": 0
+ }
+
+ # Update workflow object
+ self.workflow.currentTask = taskNumber
+ self.workflow.currentAction = 0
+ self.workflow.totalActions = 0
+
+ # Update in database
+ self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
+ logger.info(f"Updated workflow {self.workflow.id} before executing task {taskNumber}: {updateData}")
+
+ except Exception as e:
+ logger.error(f"Error updating workflow before executing task: {str(e)}")
+
+ def updateWorkflowAfterActionPlanning(self, totalActions: int):
+ """Update workflow object after action planning for current task"""
+ try:
+ updateData = {
+ "totalActions": totalActions
+ }
+
+ # Update workflow object
+ self.workflow.totalActions = totalActions
+
+ # Update in database
+ self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
+ logger.info(f"Updated workflow {self.workflow.id} after action planning: {updateData}")
+
+ except Exception as e:
+ logger.error(f"Error updating workflow after action planning: {str(e)}")
+
+ def updateWorkflowBeforeExecutingAction(self, actionNumber: int):
+ """Update workflow object before executing an action"""
+ try:
+ updateData = {
+ "currentAction": actionNumber
+ }
+
+ # Update workflow object
+ self.workflow.currentAction = actionNumber
+
+ # Update in database
+ self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
+ logger.info(f"Updated workflow {self.workflow.id} before executing action {actionNumber}: {updateData}")
+
+ except Exception as e:
+ logger.error(f"Error updating workflow before executing action: {str(e)}")
+
+ def setWorkflowTotals(self, totalTasks: int = None, totalActions: int = None):
+ """Set total counts for workflow progress tracking and update database"""
+ try:
+ updateData = {}
+
+ if totalTasks is not None:
+ self.workflow.totalTasks = totalTasks
+ updateData["totalTasks"] = totalTasks
+
+ if totalActions is not None:
+ self.workflow.totalActions = totalActions
+ updateData["totalActions"] = totalActions
+
+ # Update workflow object in database if we have changes
+ if updateData:
+ self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
+ logger.info(f"Updated workflow {self.workflow.id} totals in database: {updateData}")
+
+ logger.debug(f"Updated workflow totals: Tasks {self.workflow.totalTasks if hasattr(self.workflow, 'totalTasks') else 'N/A'}, Actions {self.workflow.totalActions if hasattr(self.workflow, 'totalActions') else 'N/A'}")
+ except Exception as e:
+ logger.error(f"Error setting workflow totals: {str(e)}")
+
+ def resetWorkflowForNewSession(self):
+ """Reset workflow object for a new session"""
+ try:
+ updateData = {
+ "currentTask": 0,
+ "currentAction": 0,
+ "totalTasks": 0,
+ "totalActions": 0
+ }
+
+ # Update workflow object
+ self.workflow.currentTask = 0
+ self.workflow.currentAction = 0
+ self.workflow.totalTasks = 0
+ self.workflow.totalActions = 0
+
+ # Update in database
+ self.services.interfaceDbChat.updateWorkflow(self.workflow.id, updateData)
+ logger.info(f"Reset workflow {self.workflow.id} for new session: {updateData}")
+
+ except Exception as e:
+ logger.error(f"Error resetting workflow for new session: {str(e)}")
+
+ def writeTraceLog(self, contextText: str, data: Any) -> None:
+ """Write trace data to configured trace file if in debug mode"""
+ try:
+ import os
+ import json
+ from datetime import datetime, UTC
+
+ # Only write if logger is in debug mode
+ if logger.level > logging.DEBUG:
+ return
+
+ # Get log directory from configuration
+ logDir = self.services.utils.configGet("APP_LOGGING_LOG_DIR", "./")
+ if not os.path.isabs(logDir):
+ # If relative path, make it relative to the gateway directory
+ gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+ logDir = os.path.join(gatewayDir, logDir)
+
+ # Ensure log directory exists
+ os.makedirs(logDir, exist_ok=True)
+
+ # Create trace file path
+ traceFile = os.path.join(logDir, "log_trace.log")
+
+ # Format the trace entry
+ timestamp = datetime.fromtimestamp(self.services.utils.getUtcTimestamp(), UTC).strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]
+ traceEntry = f"[{timestamp}] {contextText}\n"
+
+ # Add data if provided - show full content without truncation
+ if data is not None:
+ if isinstance(data, (dict, list)):
+ # Use ensure_ascii=False to preserve Unicode characters and indent=2 for readability
+ traceEntry += f"Data: {json.dumps(data, indent=2, default=str, ensure_ascii=False)}\n"
+ else:
+ # For string data, show full content without truncation
+ traceEntry += f"Data: {str(data)}\n"
+
+ traceEntry += "-" * 80 + "\n\n"
+
+ # Write to trace file
+ with open(traceFile, "a", encoding="utf-8") as f:
+ f.write(traceEntry)
+
+ except Exception as e:
+ # Don't log trace errors to avoid recursion
+ pass
+
+ def clearTraceLog(self) -> None:
+ """Clear the trace log file"""
+ try:
+ import os
+
+ # Get log directory from configuration
+ logDir = self.services.utils.configGet("APP_LOGGING_LOG_DIR", "./")
+ if not os.path.isabs(logDir):
+ # If relative path, make it relative to the gateway directory
+ gatewayDir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
+ logDir = os.path.join(gatewayDir, logDir)
+
+ # Create trace file path
+ traceFile = os.path.join(logDir, "log_trace.log")
+
+ # Clear the trace file
+ if os.path.exists(traceFile):
+ with open(traceFile, "w", encoding="utf-8") as f:
+ f.write("")
+ logger.info("Trace log cleared")
+ else:
+ logger.info("Trace log file does not exist, nothing to clear")
+
+ except Exception as e:
+ logger.error(f"Error clearing trace log: {str(e)}")
+
+ async def prepareTaskHandover(self, taskStep, taskActions, taskResult, workflow):
+ """Prepare task handover data for workflow coordination"""
+ try:
+ # Check workflow status before preparing task handover
+ self._checkWorkflowStopped(workflow)
+
+ # Log handover status summary
+ status = taskResult.status if taskResult else 'unknown'
+
+ # Handle both TaskResult and ReviewResult objects
+ if hasattr(taskResult, 'met_criteria'):
+ # This is a ReviewResult object
+ met = taskResult.met_criteria if taskResult.met_criteria else []
+ reviewResult = taskResult.to_dict()
+ else:
+ # This is a TaskResult object
+ met = []
+ reviewResult = {
+ 'status': taskResult.status if taskResult else 'unknown',
+ 'reason': taskResult.error if taskResult and hasattr(taskResult, 'error') else None,
+ 'success': taskResult.success if taskResult else False
+ }
+
+ handoverData = {
+ 'task_id': taskStep.id,
+ 'task_description': taskStep.objective,
+ 'actions': [action.to_dict() for action in taskActions] if taskActions else [],
+ 'review_result': reviewResult,
+ 'workflow_id': workflow.id,
+ 'handover_time': self.services.utils.getUtcTimestamp()
+ }
+ logger.info(f"Prepared handover for task {taskStep.id} in workflow {workflow.id}")
+ return handoverData
+ except Exception as e:
+ logger.error(f"Error in prepareTaskHandover: {str(e)}")
+ return {'error': str(e)}
diff --git a/modules/workflows/workflowManager.py b/modules/workflows/workflowManager.py
index 144e0ad7..da1ffdc0 100644
--- a/modules/workflows/workflowManager.py
+++ b/modules/workflows/workflowManager.py
@@ -12,7 +12,7 @@ from modules.datamodels.datamodelChat import (
WorkflowResult
)
from modules.datamodels.datamodelWorkflow import TaskItem, TaskStatus, TaskContext
-from modules.workflows.processing.handlingTasks import HandlingTasks, WorkflowStoppedException
+from modules.workflows.processing.workflowProcessor import WorkflowProcessor, WorkflowStoppedException
from modules.shared.timezoneUtils import get_utc_timestamp
@@ -23,7 +23,7 @@ class WorkflowManager:
def __init__(self, services):
self.services = services
- self.handlingTasks = None
+ self.workflowProcessor = None
# Exported functions
@@ -150,7 +150,7 @@ class WorkflowManager:
async def _workflowProcess(self, userInput: UserInputRequest, workflow: ChatWorkflow) -> None:
"""Process a workflow with user input"""
try:
- self.handlingTasks = HandlingTasks(self.services, workflow)
+ self.workflowProcessor = WorkflowProcessor(self.services, workflow)
message = await self._sendFirstMessage(userInput, workflow)
task_plan = await self._planTasks(userInput, workflow)
workflow_result = await self._executeTasks(task_plan, workflow)
@@ -167,7 +167,7 @@ class WorkflowManager:
async def _sendFirstMessage(self, userInput: UserInputRequest, workflow: ChatWorkflow) -> ChatMessage:
"""Send first message to start workflow"""
try:
- self.handlingTasks._checkWorkflowStopped()
+ self.workflowProcessor._checkWorkflowStopped(workflow)
# Create initial message using interface
# Generate the correct documentsLabel that matches what getDocumentReferenceString will create
@@ -200,7 +200,7 @@ class WorkflowManager:
workflow.messages.append(message)
# Clear trace log for new workflow session
- self.handlingTasks.clearTraceLog()
+ self.workflowProcessor.clearTraceLog()
# Add documents if any, now with messageId
if userInput.listFileId:
@@ -220,7 +220,7 @@ class WorkflowManager:
async def _planTasks(self, userInput: UserInputRequest, workflow: ChatWorkflow):
"""Generate task plan for workflow execution"""
- handling = self.handlingTasks
+ handling = self.workflowProcessor
# Generate task plan first (shared for both modes)
task_plan = await handling.generateTaskPlan(userInput.prompt, workflow)
if not task_plan or not task_plan.tasks:
@@ -232,7 +232,7 @@ class WorkflowManager:
async def _executeTasks(self, task_plan, workflow: ChatWorkflow) -> WorkflowResult:
"""Execute all tasks in the task plan"""
- handling = self.handlingTasks
+ handling = self.workflowProcessor
total_tasks = len(task_plan.tasks)
all_task_results: List = []
previous_results: List[str] = []
@@ -241,7 +241,7 @@ class WorkflowManager:
current_task_index = idx + 1
logger.info(f"Task {current_task_index}/{total_tasks}: {task_step.objective}")
- # Build TaskContext (mode-specific behavior is inside HandlingTasks)
+ # Build TaskContext (mode-specific behavior is inside WorkflowProcessor)
task_context = TaskContext(
task_step=task_step,
workflow=workflow,
@@ -287,7 +287,7 @@ class WorkflowManager:
"""Process workflow results and create appropriate messages"""
try:
try:
- self.handlingTasks._checkWorkflowStopped()
+ self.workflowProcessor._checkWorkflowStopped(workflow)
except WorkflowStoppedException:
logger.info(f"Workflow {workflow.id} was stopped during result processing")
@@ -505,7 +505,7 @@ class WorkflowManager:
async def _generateWorkflowFeedback(self, workflow: ChatWorkflow) -> str:
"""Generate feedback message for workflow completion"""
try:
- self.handlingTasks._checkWorkflowStopped()
+ self.workflowProcessor._checkWorkflowStopped(workflow)
# Count messages by role
user_messages = [msg for msg in workflow.messages if msg.role == 'user']
diff --git a/test-chat/extraction/method_ai_20251003-200852/ai_result_r0t0a0.txt b/test-chat/extraction/method_ai_20251003-200852/ai_result_r0t0a0.txt
deleted file mode 100644
index 02407260..00000000
--- a/test-chat/extraction/method_ai_20251003-200852/ai_result_r0t0a0.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-Prime Numbers:
-
-2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251003-200852/raw_result.txt b/test-chat/extraction/method_ai_20251003-200852/raw_result.txt
deleted file mode 100644
index 1cb838bf..00000000
--- a/test-chat/extraction/method_ai_20251003-200852/raw_result.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-```json
-{
- "documents": [
- {
- "data": "Prime Numbers:\n\n2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997",
- "mimeType": "text/plain",
- "comment": "This document contains a list of prime numbers up to 1000."
- }
- ]
-}
-```
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251003-200904/ai_result_r0t0a0.txt b/test-chat/extraction/method_ai_20251003-200904/ai_result_r0t0a0.txt
deleted file mode 100644
index 02407260..00000000
--- a/test-chat/extraction/method_ai_20251003-200904/ai_result_r0t0a0.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-Prime Numbers:
-
-2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251003-200904/raw_result.txt b/test-chat/extraction/method_ai_20251003-200904/raw_result.txt
deleted file mode 100644
index c34fc2bd..00000000
--- a/test-chat/extraction/method_ai_20251003-200904/raw_result.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-```json
-{
- "documents": [
- {
- "data": "Prime Numbers:\n\n2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997",
- "mimeType": "text/plain",
- "comment": "A list of prime numbers up to 1000."
- }
- ]
-}
-```
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-000320/ai_result_r0t0a0.txt b/test-chat/extraction/method_ai_20251004-000320/ai_result_r0t0a0.txt
new file mode 100644
index 00000000..902246ce
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-000320/ai_result_r0t0a0.txt
@@ -0,0 +1,49 @@
+Schritt-fΓΌr-Schritt-Anleitung zur Berechnung der ersten 1000 Primzahlen mit dem Sieb des Eratosthenes:
+
+1. **Initialisierung:**
+ - Bestimme eine obere Grenze fΓΌr die Berechnung der ersten 1000 Primzahlen. Eine gute Faustregel ist, die Grenze auf etwa 10.000 zu setzen, da dies sicherstellt, dass genΓΌgend Primzahlen gefunden werden.
+ - Erstelle eine Liste `isPrime` mit `True`-Werten, die die Indizes von 0 bis zur gewΓ€hlten Grenze abdeckt. Diese Liste wird verwendet, um zu markieren, ob eine Zahl eine Primzahl ist.
+
+2. **SpezialfΓ€lle behandeln:**
+ - Setze `isPrime[0]` und `isPrime[1]` auf `False`, da 0 und 1 keine Primzahlen sind.
+
+3. **Sieb des Eratosthenes anwenden:**
+ - Beginne mit der ersten Primzahl, `p = 2`.
+ - FΓΌhre eine Schleife aus, die bei `p = 2` beginnt und bis zur Quadratwurzel der oberen Grenze reicht.
+ - Wenn `isPrime[p]` `True` ist, dann:
+ - Markiere alle Vielfachen von `p` (beginnend bei `p*p`) als `False`, da sie keine Primzahlen sind.
+ - ErhΓΆhe `p` um 1 und wiederhole den Vorgang.
+
+4. **Primzahlen sammeln:**
+ - Erstelle eine leere Liste `primes`.
+ - Durchlaufe die `isPrime`-Liste und fΓΌge alle Indizes, die `True` sind, zur `primes`-Liste hinzu.
+
+5. **Erste 1000 Primzahlen extrahieren:**
+ - Schneide die `primes`-Liste auf die ersten 1000 Elemente zu.
+
+6. **Ergebnis ausgeben:**
+ - Gib die Liste der ersten 1000 Primzahlen aus.
+
+Beispiel in Python:
+
+python
+import math
+
+def sieve_of_eratosthenes(limit):
+ isPrime = [True] * (limit + 1)
+ isPrime[0] = isPrime[1] = False
+ for p in range(2, int(math.sqrt(limit)) + 1):
+ if isPrime[p]:
+ for multiple in range(p * p, limit + 1, p):
+ isPrime[multiple] = False
+ primes = [num for num, prime in enumerate(isPrime) if prime]
+ return primes
+
+# Setze die Grenze auf 10.000
+limit = 10000
+primes = sieve_of_eratosthenes(limit)
+first_1000_primes = primes[:1000]
+print(first_1000_primes)
+
+
+Kommentar: Diese Anleitung beschreibt die Implementierung des Sieb des Eratosthenes zur Berechnung der ersten 1000 Primzahlen und enthΓ€lt ein Beispiel in Python.
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-000320/raw_result.txt b/test-chat/extraction/method_ai_20251004-000320/raw_result.txt
new file mode 100644
index 00000000..6e1c391a
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-000320/raw_result.txt
@@ -0,0 +1,12 @@
+```json
+{
+ "documents": [
+ {
+ "data": "Schritt-fΓΌr-Schritt-Anleitung zur Berechnung der ersten 1000 Primzahlen mit dem Sieb des Eratosthenes:\n\n1. **Initialisierung:**\n - Bestimme eine obere Grenze fΓΌr die Berechnung der ersten 1000 Primzahlen. Eine gute Faustregel ist, die Grenze auf etwa 10.000 zu setzen, da dies sicherstellt, dass genΓΌgend Primzahlen gefunden werden.\n - Erstelle eine Liste `isPrime` mit `True`-Werten, die die Indizes von 0 bis zur gewΓ€hlten Grenze abdeckt. Diese Liste wird verwendet, um zu markieren, ob eine Zahl eine Primzahl ist.\n\n2. **SpezialfΓ€lle behandeln:**\n - Setze `isPrime[0]` und `isPrime[1]` auf `False`, da 0 und 1 keine Primzahlen sind.\n\n3. **Sieb des Eratosthenes anwenden:**\n - Beginne mit der ersten Primzahl, `p = 2`.\n - FΓΌhre eine Schleife aus, die bei `p = 2` beginnt und bis zur Quadratwurzel der oberen Grenze reicht.\n - Wenn `isPrime[p]` `True` ist, dann:\n - Markiere alle Vielfachen von `p` (beginnend bei `p*p`) als `False`, da sie keine Primzahlen sind.\n - ErhΓΆhe `p` um 1 und wiederhole den Vorgang.\n\n4. **Primzahlen sammeln:**\n - Erstelle eine leere Liste `primes`.\n - Durchlaufe die `isPrime`-Liste und fΓΌge alle Indizes, die `True` sind, zur `primes`-Liste hinzu.\n\n5. **Erste 1000 Primzahlen extrahieren:**\n - Schneide die `primes`-Liste auf die ersten 1000 Elemente zu.\n\n6. **Ergebnis ausgeben:**\n - Gib die Liste der ersten 1000 Primzahlen aus.\n\nBeispiel in Python:\n\n```python\nimport math\n\ndef sieve_of_eratosthenes(limit):\n isPrime = [True] * (limit + 1)\n isPrime[0] = isPrime[1] = False\n for p in range(2, int(math.sqrt(limit)) + 1):\n if isPrime[p]:\n for multiple in range(p * p, limit + 1, p):\n isPrime[multiple] = False\n primes = [num for num, prime in enumerate(isPrime) if prime]\n return primes\n\n# Setze die Grenze auf 10.000\nlimit = 10000\nprimes = sieve_of_eratosthenes(limit)\nfirst_1000_primes = primes[:1000]\nprint(first_1000_primes)\n```\n\nKommentar: Diese Anleitung beschreibt die Implementierung des Sieb des Eratosthenes zur Berechnung der ersten 1000 Primzahlen und enthΓ€lt ein Beispiel in Python.",
+ "mimeType": "text/plain",
+ "comment": "Anleitung zur Berechnung der ersten 1000 Primzahlen mit dem Sieb des Eratosthenes."
+ }
+ ],
+ "continue": false
+}
+```
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251003-200841/summary.txt b/test-chat/extraction/method_ai_20251004-000320/summary.txt
similarity index 100%
rename from test-chat/extraction/method_ai_20251003-200841/summary.txt
rename to test-chat/extraction/method_ai_20251004-000320/summary.txt
diff --git a/test-chat/extraction/method_ai_20251004-000335/ai_result_r0t0a0.txt b/test-chat/extraction/method_ai_20251004-000335/ai_result_r0t0a0.txt
new file mode 100644
index 00000000..56871e2d
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-000335/ai_result_r0t0a0.txt
@@ -0,0 +1,10 @@
+1. 2
+2. 3
+3. 5
+4. 7
+5. 11
+6. 13
+7. 17
+8. 19
+9. 23
+10. 29
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-000335/raw_result.txt b/test-chat/extraction/method_ai_20251004-000335/raw_result.txt
new file mode 100644
index 00000000..c09fff2b
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-000335/raw_result.txt
@@ -0,0 +1,53 @@
+To generate a list of prime numbers up to a specified number \( N \) using the Sieve of Eratosthenes algorithm, we will first implement the algorithm and then format the output as a numbered list. Let's assume \( N = 30 \) for this example.
+
+```python
+def sieve_of_eratosthenes(n):
+ primes = []
+ is_prime = [True] * (n + 1)
+ p = 2
+ while (p * p <= n):
+ if (is_prime[p] == True):
+ for i in range(p * p, n + 1, p):
+ is_prime[i] = False
+ p += 1
+ for p in range(2, n + 1):
+ if is_prime[p]:
+ primes.append(p)
+ return primes
+
+# Example usage
+N = 30
+prime_numbers = sieve_of_eratosthenes(N)
+```
+
+Now, let's format these prime numbers as a numbered list:
+
+```plaintext
+1. 2
+2. 3
+3. 5
+4. 7
+5. 11
+6. 13
+7. 17
+8. 19
+9. 23
+10. 29
+```
+
+Here is the JSON response with the formatted list:
+
+```json
+{
+ "documents": [
+ {
+ "data": "1. 2\n2. 3\n3. 5\n4. 7\n5. 11\n6. 13\n7. 17\n8. 19\n9. 23\n10. 29",
+ "mimeType": "text/plain",
+ "comment": "Prime numbers up to 30 using the Sieve of Eratosthenes."
+ }
+ ],
+ "continue": false
+}
+```
+
+This JSON structure contains the prime numbers up to 30, formatted as a numbered list in plain text. If you need primes up to a different number \( N \), you can adjust the value of \( N \) in the code and regenerate the list.
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251003-200852/summary.txt b/test-chat/extraction/method_ai_20251004-000335/summary.txt
similarity index 100%
rename from test-chat/extraction/method_ai_20251003-200852/summary.txt
rename to test-chat/extraction/method_ai_20251004-000335/summary.txt
diff --git a/test-chat/extraction/method_ai_20251004-001456/ai_result_r0t0a0.txt b/test-chat/extraction/method_ai_20251004-001456/ai_result_r0t0a0.txt
new file mode 100644
index 00000000..efb0c11e
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-001456/ai_result_r0t0a0.txt
@@ -0,0 +1,32 @@
+def sieve_of_eratosthenes(limit):
+ """
+ Generate all prime numbers up to the given limit using the Sieve of Eratosthenes algorithm.
+ """
+ is_prime = [True] * (limit + 1)
+ p = 2
+ while (p * p <= limit):
+ if (is_prime[p] == True):
+ for i in range(p * p, limit + 1, p):
+ is_prime[i] = False
+ p += 1
+ prime_numbers = [p for p in range(2, limit + 1) if is_prime[p]]
+ return prime_numbers
+
+
+def first_n_primes(n):
+ """
+ Calculate the first n prime numbers using the Sieve of Eratosthenes algorithm.
+ """
+ # Estimate an upper limit for the nth prime number using the approximation n * log(n * log(n))
+ # This is a rough estimate and ensures we have a high enough limit to find the first n primes.
+ import math
+ if n < 6:
+ limit = 15
+ else:
+ limit = int(n * math.log(n * math.log(n)))
+ primes = sieve_of_eratosthenes(limit)
+ return primes[:n]
+
+# Example usage:
+first_1000_primes = first_n_primes(1000)
+print(first_1000_primes)
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-001456/raw_result.txt b/test-chat/extraction/method_ai_20251004-001456/raw_result.txt
new file mode 100644
index 00000000..883b867a
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-001456/raw_result.txt
@@ -0,0 +1,12 @@
+```json
+{
+ "documents": [
+ {
+ "data": "def sieve_of_eratosthenes(limit):\n \"\"\"\n Generate all prime numbers up to the given limit using the Sieve of Eratosthenes algorithm.\n \"\"\"\n is_prime = [True] * (limit + 1)\n p = 2\n while (p * p <= limit):\n if (is_prime[p] == True):\n for i in range(p * p, limit + 1, p):\n is_prime[i] = False\n p += 1\n prime_numbers = [p for p in range(2, limit + 1) if is_prime[p]]\n return prime_numbers\n\n\ndef first_n_primes(n):\n \"\"\"\n Calculate the first n prime numbers using the Sieve of Eratosthenes algorithm.\n \"\"\"\n # Estimate an upper limit for the nth prime number using the approximation n * log(n * log(n))\n # This is a rough estimate and ensures we have a high enough limit to find the first n primes.\n import math\n if n < 6:\n limit = 15\n else:\n limit = int(n * math.log(n * math.log(n)))\n primes = sieve_of_eratosthenes(limit)\n return primes[:n]\n\n# Example usage:\nfirst_1000_primes = first_n_primes(1000)\nprint(first_1000_primes)\n",
+ "mimeType": "text/plain",
+ "comment": "This function calculates the first 1000 prime numbers using the Sieve of Eratosthenes algorithm."
+ }
+ ],
+ "continue": false
+}
+```
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251003-200904/summary.txt b/test-chat/extraction/method_ai_20251004-001456/summary.txt
similarity index 100%
rename from test-chat/extraction/method_ai_20251003-200904/summary.txt
rename to test-chat/extraction/method_ai_20251004-001456/summary.txt
diff --git a/test-chat/extraction/method_ai_20251004-001507/ai_result_r0t0a0.txt b/test-chat/extraction/method_ai_20251004-001507/ai_result_r0t0a0.txt
new file mode 100644
index 00000000..f8c80224
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-001507/ai_result_r0t0a0.txt
@@ -0,0 +1 @@
+2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-001507/raw_result.txt b/test-chat/extraction/method_ai_20251004-001507/raw_result.txt
new file mode 100644
index 00000000..682c9f2c
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-001507/raw_result.txt
@@ -0,0 +1,12 @@
+```json
+{
+ "documents": [
+ {
+ "data": "2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97",
+ "mimeType": "text/plain",
+ "comment": "List of prime numbers between 1 and 100."
+ }
+ ],
+ "continue": false
+}
+```
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-001507/summary.txt b/test-chat/extraction/method_ai_20251004-001507/summary.txt
new file mode 100644
index 00000000..b789d667
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-001507/summary.txt
@@ -0,0 +1,2 @@
+documents: 1
+doc[1]: name=ai_result_r0t0a0.txt, mimeType=text/plain
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-001800/ai_result_r0t0a0.txt b/test-chat/extraction/method_ai_20251004-001800/ai_result_r0t0a0.txt
new file mode 100644
index 00000000..a29a0071
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-001800/ai_result_r0t0a0.txt
@@ -0,0 +1,27 @@
+def sieve_of_eratosthenes(n):
+ """
+ Generate a list of prime numbers up to n using the Sieve of Eratosthenes algorithm.
+ """
+ is_prime = [True] * (n + 1)
+ p = 2
+ while (p * p <= n):
+ if (is_prime[p] == True):
+ for i in range(p * p, n + 1, p):
+ is_prime[i] = False
+ p += 1
+ prime_numbers = [p for p in range(2, n) if is_prime[p]]
+ return prime_numbers
+
+# We need to find the upper limit for the sieve to ensure we get at least 1000 primes.
+# A rough estimate for the nth prime is n * log(n * log(n)), but we'll use a safe upper bound.
+
+def find_first_1000_primes():
+ upper_limit = 10000 # A safe upper limit to ensure we get at least 1000 primes
+ primes = sieve_of_eratosthenes(upper_limit)
+ return primes[:1000]
+
+# Get the first 1000 prime numbers
+first_1000_primes = find_first_1000_primes()
+
+# Print the first 1000 prime numbers
+print(first_1000_primes)
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-001800/raw_result.txt b/test-chat/extraction/method_ai_20251004-001800/raw_result.txt
new file mode 100644
index 00000000..7753eb9d
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-001800/raw_result.txt
@@ -0,0 +1,12 @@
+```json
+{
+ "documents": [
+ {
+ "data": "def sieve_of_eratosthenes(n):\n \"\"\"\n Generate a list of prime numbers up to n using the Sieve of Eratosthenes algorithm.\n \"\"\"\n is_prime = [True] * (n + 1)\n p = 2\n while (p * p <= n):\n if (is_prime[p] == True):\n for i in range(p * p, n + 1, p):\n is_prime[i] = False\n p += 1\n prime_numbers = [p for p in range(2, n) if is_prime[p]]\n return prime_numbers\n\n# We need to find the upper limit for the sieve to ensure we get at least 1000 primes.\n# A rough estimate for the nth prime is n * log(n * log(n)), but we'll use a safe upper bound.\n\ndef find_first_1000_primes():\n upper_limit = 10000 # A safe upper limit to ensure we get at least 1000 primes\n primes = sieve_of_eratosthenes(upper_limit)\n return primes[:1000]\n\n# Get the first 1000 prime numbers\nfirst_1000_primes = find_first_1000_primes()\n\n# Print the first 1000 prime numbers\nprint(first_1000_primes)\n",
+ "mimeType": "text/plain",
+ "comment": "Python code to calculate and validate the first 1000 prime numbers using the Sieve of Eratosthenes."
+ }
+ ],
+ "continue": false
+}
+```
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-001800/summary.txt b/test-chat/extraction/method_ai_20251004-001800/summary.txt
new file mode 100644
index 00000000..b789d667
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-001800/summary.txt
@@ -0,0 +1,2 @@
+documents: 1
+doc[1]: name=ai_result_r0t0a0.txt, mimeType=text/plain
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-001810/ai_result_r0t0a0.txt b/test-chat/extraction/method_ai_20251004-001810/ai_result_r0t0a0.txt
new file mode 100644
index 00000000..ef974af1
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-001810/ai_result_r0t0a0.txt
@@ -0,0 +1,24 @@
+Liste der ersten 20 Primzahlen:
+
+1. 2
+2. 3
+3. 5
+4. 7
+5. 11
+6. 13
+7. 17
+8. 19
+9. 23
+10. 29
+11. 31
+12. 37
+13. 41
+14. 43
+15. 47
+16. 53
+17. 59
+18. 61
+19. 67
+20. 71
+
+Hinweis: Jede Zahl ist eine Primzahl und steht in einer eigenen Zeile, um die Lesbarkeit zu verbessern.
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-001810/raw_result.txt b/test-chat/extraction/method_ai_20251004-001810/raw_result.txt
new file mode 100644
index 00000000..9d417145
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-001810/raw_result.txt
@@ -0,0 +1,12 @@
+```json
+{
+ "documents": [
+ {
+ "data": "Liste der ersten 20 Primzahlen:\n\n1. 2\n2. 3\n3. 5\n4. 7\n5. 11\n6. 13\n7. 17\n8. 19\n9. 23\n10. 29\n11. 31\n12. 37\n13. 41\n14. 43\n15. 47\n16. 53\n17. 59\n18. 61\n19. 67\n20. 71\n\nHinweis: Jede Zahl ist eine Primzahl und steht in einer eigenen Zeile, um die Lesbarkeit zu verbessern.",
+ "mimeType": "text/plain",
+ "comment": "Eine einfache Liste der ersten 20 Primzahlen, formatiert fΓΌr Klarheit und Lesbarkeit."
+ }
+ ],
+ "continue": false
+}
+```
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-001810/summary.txt b/test-chat/extraction/method_ai_20251004-001810/summary.txt
new file mode 100644
index 00000000..b789d667
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-001810/summary.txt
@@ -0,0 +1,2 @@
+documents: 1
+doc[1]: name=ai_result_r0t0a0.txt, mimeType=text/plain
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251003-200841/ai_result_r0t0a0.txt b/test-chat/extraction/method_ai_20251004-004539/ai_result_r0t0a0.txt
similarity index 73%
rename from test-chat/extraction/method_ai_20251003-200841/ai_result_r0t0a0.txt
rename to test-chat/extraction/method_ai_20251004-004539/ai_result_r0t0a0.txt
index dfa26220..adfab51b 100644
--- a/test-chat/extraction/method_ai_20251003-200841/ai_result_r0t0a0.txt
+++ b/test-chat/extraction/method_ai_20251004-004539/ai_result_r0t0a0.txt
@@ -1,32 +1,7 @@
-To calculate the first 1000 prime numbers efficiently, we can use the Sieve of Eratosthenes algorithm. This algorithm is efficient for finding all prime numbers up to a specified integer. Here's how you can implement it and extract the first 1000 prime numbers:
-
-```python
-def sieve_of_eratosthenes(limit):
- is_prime = [True] * (limit + 1)
- p = 2
- while (p * p <= limit):
- if (is_prime[p] == True):
- for i in range(p * p, limit + 1, p):
- is_prime[i] = False
- p += 1
- prime_numbers = [p for p in range(2, limit) if is_prime[p]]
- return prime_numbers
-
-def first_n_primes(n):
- limit = 12500 # Initial guess for the upper limit
- primes = sieve_of_eratosthenes(limit)
- while len(primes) < n:
- limit *= 2
- primes = sieve_of_eratosthenes(limit)
- return primes[:n]
-
-first_1000_primes = first_n_primes(1000)
-```
-
-Now, let's format the output in the specified JSON format:
+To calculate and validate the first 1000 prime numbers, we can use a simple algorithm to generate them. Here's the JSON response with the first 1000 prime numbers:
```json
{
"documents": [
{
- "data": "2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347, 3359, 336
\ No newline at end of file
+ "data": "2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, 3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511, 3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, 3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643, 3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, 3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 382
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251003-200841/raw_result.txt b/test-chat/extraction/method_ai_20251004-004539/raw_result.txt
similarity index 73%
rename from test-chat/extraction/method_ai_20251003-200841/raw_result.txt
rename to test-chat/extraction/method_ai_20251004-004539/raw_result.txt
index dfa26220..adfab51b 100644
--- a/test-chat/extraction/method_ai_20251003-200841/raw_result.txt
+++ b/test-chat/extraction/method_ai_20251004-004539/raw_result.txt
@@ -1,32 +1,7 @@
-To calculate the first 1000 prime numbers efficiently, we can use the Sieve of Eratosthenes algorithm. This algorithm is efficient for finding all prime numbers up to a specified integer. Here's how you can implement it and extract the first 1000 prime numbers:
-
-```python
-def sieve_of_eratosthenes(limit):
- is_prime = [True] * (limit + 1)
- p = 2
- while (p * p <= limit):
- if (is_prime[p] == True):
- for i in range(p * p, limit + 1, p):
- is_prime[i] = False
- p += 1
- prime_numbers = [p for p in range(2, limit) if is_prime[p]]
- return prime_numbers
-
-def first_n_primes(n):
- limit = 12500 # Initial guess for the upper limit
- primes = sieve_of_eratosthenes(limit)
- while len(primes) < n:
- limit *= 2
- primes = sieve_of_eratosthenes(limit)
- return primes[:n]
-
-first_1000_primes = first_n_primes(1000)
-```
-
-Now, let's format the output in the specified JSON format:
+To calculate and validate the first 1000 prime numbers, we can use a simple algorithm to generate them. Here's the JSON response with the first 1000 prime numbers:
```json
{
"documents": [
{
- "data": "2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347, 3359, 336
\ No newline at end of file
+ "data": "2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, 3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511, 3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, 3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643, 3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, 3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 382
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-004539/summary.txt b/test-chat/extraction/method_ai_20251004-004539/summary.txt
new file mode 100644
index 00000000..b789d667
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-004539/summary.txt
@@ -0,0 +1,2 @@
+documents: 1
+doc[1]: name=ai_result_r0t0a0.txt, mimeType=text/plain
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-004746/ai_result_r0t0a0.txt b/test-chat/extraction/method_ai_20251004-004746/ai_result_r0t0a0.txt
new file mode 100644
index 00000000..7f56ea69
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-004746/ai_result_r0t0a0.txt
@@ -0,0 +1,7 @@
+To calculate and validate the first 1000 prime numbers, I will generate them and provide the list in the requested JSON format. Here is the response:
+
+```json
+{
+ "documents": [
+ {
+ "data": "2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, 3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511, 3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, 3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643, 3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, 3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 3821, 382
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-004746/raw_result.txt b/test-chat/extraction/method_ai_20251004-004746/raw_result.txt
new file mode 100644
index 00000000..7f56ea69
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-004746/raw_result.txt
@@ -0,0 +1,7 @@
+To calculate and validate the first 1000 prime numbers, I will generate them and provide the list in the requested JSON format. Here is the response:
+
+```json
+{
+ "documents": [
+ {
+ "data": "2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, 3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511, 3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, 3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643, 3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, 3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 3821, 382
\ No newline at end of file
diff --git a/test-chat/extraction/method_ai_20251004-004746/summary.txt b/test-chat/extraction/method_ai_20251004-004746/summary.txt
new file mode 100644
index 00000000..b789d667
--- /dev/null
+++ b/test-chat/extraction/method_ai_20251004-004746/summary.txt
@@ -0,0 +1,2 @@
+documents: 1
+doc[1]: name=ai_result_r0t0a0.txt, mimeType=text/plain
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220757_1_1_0/message_text.txt b/test-chat/obj/m20251003-220757_1_1_0/message_text.txt
deleted file mode 100644
index 34e3fea1..00000000
--- a/test-chat/obj/m20251003-220757_1_1_0/message_text.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-π **Task 1/2**
-
-π¬ Berechne die ersten 1000 Primzahlen mit einem effizienten Algorithmus
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220841_1_1_1/message.json b/test-chat/obj/m20251003-220841_1_1_1/message.json
deleted file mode 100644
index b3cbb479..00000000
--- a/test-chat/obj/m20251003-220841_1_1_1/message.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "id": "msg_b476c20d-37e0-4758-b9bc-dfac28d7aca0",
- "workflowId": "4886a461-687b-4980-853d-91251f9424ac",
- "parentMessageId": null,
- "message": "**Action 1/1 (ai.process)**\n\nβ
Calculate first 1000 prime numbers using efficient algorithm\n\n",
- "role": "assistant",
- "status": "step",
- "sequenceNr": 4,
- "publishedAt": 1759522121.3918097,
- "roundNumber": 1,
- "taskNumber": 1,
- "actionNumber": 1,
- "documentsLabel": "round1_task1_action1_results",
- "actionId": "action_aaacb272-b7b6-494b-9fea-db17d710bc8a",
- "actionMethod": "ai",
- "actionName": "process",
- "success": null,
- "documents": []
-}
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220841_1_1_1/message_text.txt b/test-chat/obj/m20251003-220841_1_1_1/message_text.txt
deleted file mode 100644
index ce949fc5..00000000
--- a/test-chat/obj/m20251003-220841_1_1_1/message_text.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-**Action 1/1 (ai.process)**
-
-β
Calculate first 1000 prime numbers using efficient algorithm
-
diff --git a/test-chat/obj/m20251003-220841_1_1_1/round1_task1_action1_results/document_001_metadata.json b/test-chat/obj/m20251003-220841_1_1_1/round1_task1_action1_results/document_001_metadata.json
deleted file mode 100644
index f6c985f0..00000000
--- a/test-chat/obj/m20251003-220841_1_1_1/round1_task1_action1_results/document_001_metadata.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "id": "d19c5aba-156b-4aab-b8d2-e5701d5f01b8",
- "messageId": "msg_b476c20d-37e0-4758-b9bc-dfac28d7aca0",
- "fileId": "e36734d2-da8d-4423-b7a3-b44fa2c93f30",
- "fileName": "ai_result_r0t0a0_39.txt",
- "fileSize": 3638,
- "mimeType": "text/plain",
- "roundNumber": 1,
- "taskNumber": 1,
- "actionNumber": 1,
- "actionId": "action_aaacb272-b7b6-494b-9fea-db17d710bc8a"
-}
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220843_1_1_1/message.json b/test-chat/obj/m20251003-220843_1_1_1/message.json
deleted file mode 100644
index ae660dd3..00000000
--- a/test-chat/obj/m20251003-220843_1_1_1/message.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "id": "msg_d50a37c8-cea6-40b9-b317-6b4eb339c619",
- "workflowId": "4886a461-687b-4980-853d-91251f9424ac",
- "parentMessageId": null,
- "message": "π Step 1/5: ai.process β β
",
- "role": "assistant",
- "status": "step",
- "sequenceNr": 5,
- "publishedAt": 1759522122.9830983,
- "roundNumber": 1,
- "taskNumber": 1,
- "actionNumber": 1,
- "documentsLabel": "round1_task1_action1_results",
- "actionId": null,
- "actionMethod": null,
- "actionName": null,
- "success": null,
- "documents": []
-}
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220843_1_1_1/message_text.txt b/test-chat/obj/m20251003-220843_1_1_1/message_text.txt
deleted file mode 100644
index 2e4959a7..00000000
--- a/test-chat/obj/m20251003-220843_1_1_1/message_text.txt
+++ /dev/null
@@ -1 +0,0 @@
-π Step 1/5: ai.process β β
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220843_1_2_0/message_text.txt b/test-chat/obj/m20251003-220843_1_2_0/message_text.txt
deleted file mode 100644
index 6f4a3b93..00000000
--- a/test-chat/obj/m20251003-220843_1_2_0/message_text.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-π **Task 2/2**
-
-π¬ Erstelle ein Word-Dokument und formatiere die Primzahlen ΓΌbersichtlich
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220853_1_2_1/round1_task2_action1_results/document_001_metadata.json b/test-chat/obj/m20251003-220853_1_2_1/round1_task2_action1_results/document_001_metadata.json
deleted file mode 100644
index 7b4ca15d..00000000
--- a/test-chat/obj/m20251003-220853_1_2_1/round1_task2_action1_results/document_001_metadata.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "id": "bf4e69a8-fcd6-42a4-862f-de2dbc907cb4",
- "messageId": "msg_08fa0763-a33c-4ad2-81f8-e943354dc4e5",
- "fileId": "a3901b8a-8a59-4162-94f7-3151f039b014",
- "fileName": "ai_result_r0t0a0_40.txt",
- "fileSize": 825,
- "mimeType": "text/plain",
- "roundNumber": 1,
- "taskNumber": 2,
- "actionNumber": 1,
- "actionId": "action_ea265db5-c27e-43bc-8667-182369622318"
-}
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220854_1_2_1/message.json b/test-chat/obj/m20251003-220854_1_2_1/message.json
deleted file mode 100644
index 67dca005..00000000
--- a/test-chat/obj/m20251003-220854_1_2_1/message.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "id": "msg_225c63a8-49b7-4c66-93cf-3944e2219b5b",
- "workflowId": "4886a461-687b-4980-853d-91251f9424ac",
- "parentMessageId": null,
- "message": "π Step 1/5: ai.process β β
",
- "role": "assistant",
- "status": "step",
- "sequenceNr": 8,
- "publishedAt": 1759522134.8963306,
- "roundNumber": 1,
- "taskNumber": 2,
- "actionNumber": 1,
- "documentsLabel": "round1_task2_action1_results",
- "actionId": null,
- "actionMethod": null,
- "actionName": null,
- "success": null,
- "documents": []
-}
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220854_1_2_1/message_text.txt b/test-chat/obj/m20251003-220854_1_2_1/message_text.txt
deleted file mode 100644
index 2e4959a7..00000000
--- a/test-chat/obj/m20251003-220854_1_2_1/message_text.txt
+++ /dev/null
@@ -1 +0,0 @@
-π Step 1/5: ai.process β β
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220904_1_2_2/message.json b/test-chat/obj/m20251003-220904_1_2_2/message.json
deleted file mode 100644
index 817b6f4e..00000000
--- a/test-chat/obj/m20251003-220904_1_2_2/message.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "id": "msg_cf96022a-b7d8-48c9-9a97-208d1cd68f5f",
- "workflowId": "4886a461-687b-4980-853d-91251f9424ac",
- "parentMessageId": null,
- "message": "**Action 2/1 (ai.process)**\n\nβ
Create and format Word document with prime numbers\n\n",
- "role": "assistant",
- "status": "step",
- "sequenceNr": 9,
- "publishedAt": 1759522144.608428,
- "roundNumber": 1,
- "taskNumber": 2,
- "actionNumber": 2,
- "documentsLabel": "round1_task2_action2_results",
- "actionId": "action_dc3e1666-c85f-46c1-a1a5-babbb4ac6688",
- "actionMethod": "ai",
- "actionName": "process",
- "success": null,
- "documents": []
-}
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220904_1_2_2/round1_task2_action2_results/document_001_metadata.json b/test-chat/obj/m20251003-220904_1_2_2/round1_task2_action2_results/document_001_metadata.json
deleted file mode 100644
index 6e709673..00000000
--- a/test-chat/obj/m20251003-220904_1_2_2/round1_task2_action2_results/document_001_metadata.json
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "id": "e2816428-e83e-4116-985a-6fe52622605d",
- "messageId": "msg_cf96022a-b7d8-48c9-9a97-208d1cd68f5f",
- "fileId": "9ec61115-e379-4cde-992e-b064eccb16a5",
- "fileName": "ai_result_r0t0a0_41.txt",
- "fileSize": 825,
- "mimeType": "text/plain",
- "roundNumber": 1,
- "taskNumber": 2,
- "actionNumber": 2,
- "actionId": "action_dc3e1666-c85f-46c1-a1a5-babbb4ac6688"
-}
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220906_1_2_2/message.json b/test-chat/obj/m20251003-220906_1_2_2/message.json
deleted file mode 100644
index 0f1b6c36..00000000
--- a/test-chat/obj/m20251003-220906_1_2_2/message.json
+++ /dev/null
@@ -1,19 +0,0 @@
-{
- "id": "msg_b9f347a2-a282-4d06-a1d5-cc06e45e3a63",
- "workflowId": "4886a461-687b-4980-853d-91251f9424ac",
- "parentMessageId": null,
- "message": "π Step 2/5: ai.process β β
",
- "role": "assistant",
- "status": "step",
- "sequenceNr": 10,
- "publishedAt": 1759522146.7869656,
- "roundNumber": 1,
- "taskNumber": 2,
- "actionNumber": 2,
- "documentsLabel": "round1_task2_action2_results",
- "actionId": null,
- "actionMethod": null,
- "actionName": null,
- "success": null,
- "documents": []
-}
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220906_1_2_2/message_text.txt b/test-chat/obj/m20251003-220906_1_2_2/message_text.txt
deleted file mode 100644
index 219a06b7..00000000
--- a/test-chat/obj/m20251003-220906_1_2_2/message_text.txt
+++ /dev/null
@@ -1 +0,0 @@
-π Step 2/5: ai.process β β
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220907_1_0_0/message_text.txt b/test-chat/obj/m20251003-220907_1_0_0/message_text.txt
deleted file mode 100644
index 3a2ca270..00000000
--- a/test-chat/obj/m20251003-220907_1_0_0/message_text.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-Workflow completed.
-
-Processed 1 user inputs and generated 6 responses.
-Workflow status: running
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220751_1_0_0/message.json b/test-chat/obj/m20251004-015321_1_0_0/message.json
similarity index 72%
rename from test-chat/obj/m20251003-220751_1_0_0/message.json
rename to test-chat/obj/m20251004-015321_1_0_0/message.json
index c31f7fbd..1eaf8cd1 100644
--- a/test-chat/obj/m20251003-220751_1_0_0/message.json
+++ b/test-chat/obj/m20251004-015321_1_0_0/message.json
@@ -1,12 +1,12 @@
{
- "id": "msg_b5c1b3f5-6ba3-4927-ade9-902afb683490",
- "workflowId": "4886a461-687b-4980-853d-91251f9424ac",
+ "id": "msg_d55b3fe5-c4f5-4a9c-b828-f68bde07db0c",
+ "workflowId": "5486d66b-c563-4b8b-a48d-f31a6df2dd7e",
"parentMessageId": null,
"message": "Gib mir die ersten 1000 Primzahlen in einem word dokument aus",
"role": "user",
"status": "first",
"sequenceNr": 1,
- "publishedAt": 1759522071.7880292,
+ "publishedAt": 1759535601.52835,
"roundNumber": 1,
"taskNumber": 0,
"actionNumber": 0,
diff --git a/test-chat/obj/m20251003-220751_1_0_0/message_text.txt b/test-chat/obj/m20251004-015321_1_0_0/message_text.txt
similarity index 100%
rename from test-chat/obj/m20251003-220751_1_0_0/message_text.txt
rename to test-chat/obj/m20251004-015321_1_0_0/message_text.txt
diff --git a/test-chat/obj/m20251003-220757_1_1_0/message.json b/test-chat/obj/m20251004-015326_1_1_0/message.json
similarity index 64%
rename from test-chat/obj/m20251003-220757_1_1_0/message.json
rename to test-chat/obj/m20251004-015326_1_1_0/message.json
index a85e34c1..1cfcd50b 100644
--- a/test-chat/obj/m20251003-220757_1_1_0/message.json
+++ b/test-chat/obj/m20251004-015326_1_1_0/message.json
@@ -1,12 +1,12 @@
{
- "id": "msg_4079e23b-a1bc-4f25-9304-66e7a00d3143",
- "workflowId": "4886a461-687b-4980-853d-91251f9424ac",
+ "id": "msg_de2dcf15-4c87-4b7c-b61a-0ddcfe8e55ac",
+ "workflowId": "5486d66b-c563-4b8b-a48d-f31a6df2dd7e",
"parentMessageId": null,
- "message": "π **Task 1/2**\n\nπ¬ Berechne die ersten 1000 Primzahlen mit einem effizienten Algorithmus",
+ "message": "π **Task 1/2**\n\nπ¬ Berechne die ersten 1000 Primzahlen in korrekter Reihenfolge",
"role": "assistant",
"status": "step",
"sequenceNr": 3,
- "publishedAt": 1759522077.4175804,
+ "publishedAt": 1759535606.9074337,
"roundNumber": 1,
"taskNumber": 1,
"actionNumber": 0,
diff --git a/test-chat/obj/m20251004-015326_1_1_0/message_text.txt b/test-chat/obj/m20251004-015326_1_1_0/message_text.txt
new file mode 100644
index 00000000..6039ffed
--- /dev/null
+++ b/test-chat/obj/m20251004-015326_1_1_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 1/2**
+
+π¬ Berechne die ersten 1000 Primzahlen in korrekter Reihenfolge
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-015331_1_1_1/message.json b/test-chat/obj/m20251004-015331_1_1_1/message.json
new file mode 100644
index 00000000..b76c4cf8
--- /dev/null
+++ b/test-chat/obj/m20251004-015331_1_1_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_47d719fb-d428-4eab-bd77-3781c281b016",
+ "workflowId": "5486d66b-c563-4b8b-a48d-f31a6df2dd7e",
+ "parentMessageId": null,
+ "message": "β‘ **Action 1/1** (Method ai.process.generate_prime_calculation_plan)\n\nπ¬ Ich erstelle einen detaillierten Aktionsplan zur Berechnung und Validierung der ersten 1000 Primzahlen.",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 4,
+ "publishedAt": 1759535611.6550252,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "documentsLabel": "action_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-015331_1_1_1/message_text.txt b/test-chat/obj/m20251004-015331_1_1_1/message_text.txt
new file mode 100644
index 00000000..214a2aca
--- /dev/null
+++ b/test-chat/obj/m20251004-015331_1_1_1/message_text.txt
@@ -0,0 +1,3 @@
+β‘ **Action 1/1** (Method ai.process.generate_prime_calculation_plan)
+
+π¬ Ich erstelle einen detaillierten Aktionsplan zur Berechnung und Validierung der ersten 1000 Primzahlen.
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-015335_1_1_0/message.json b/test-chat/obj/m20251004-015335_1_1_0/message.json
new file mode 100644
index 00000000..c24e622f
--- /dev/null
+++ b/test-chat/obj/m20251004-015335_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_f29b3c87-613e-492e-95e5-480e499ec697",
+ "workflowId": "5486d66b-c563-4b8b-a48d-f31a6df2dd7e",
+ "parentMessageId": null,
+ "message": "**Task 1**\n\nβ 'Calculate and validate first 1000 prime numbers' failed\n\nThe task execution failed due to an unknown method error, and no documents were produced. The primary objective of calculating the first 1000 prime numbers was not achieved.\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 5,
+ "publishedAt": 1759535615.1013582,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": null,
+ "actionId": null,
+ "actionMethod": "task",
+ "actionName": "task_error",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-015335_1_1_0/message_text.txt b/test-chat/obj/m20251004-015335_1_1_0/message_text.txt
new file mode 100644
index 00000000..bd25804a
--- /dev/null
+++ b/test-chat/obj/m20251004-015335_1_1_0/message_text.txt
@@ -0,0 +1,6 @@
+**Task 1**
+
+β 'Calculate and validate first 1000 prime numbers' failed
+
+The task execution failed due to an unknown method error, and no documents were produced. The primary objective of calculating the first 1000 prime numbers was not achieved.
+
diff --git a/test-chat/obj/m20251004-015335_1_2_0/message.json b/test-chat/obj/m20251004-015335_1_2_0/message.json
new file mode 100644
index 00000000..c1d3dfe4
--- /dev/null
+++ b/test-chat/obj/m20251004-015335_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_b88c2bc7-a385-4093-b8e1-3dcce01697c6",
+ "workflowId": "5486d66b-c563-4b8b-a48d-f31a6df2dd7e",
+ "parentMessageId": null,
+ "message": "π **Task 2/2**\n\nπ¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 6,
+ "publishedAt": 1759535615.3469894,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-015335_1_2_0/message_text.txt b/test-chat/obj/m20251004-015335_1_2_0/message_text.txt
new file mode 100644
index 00000000..7efb03e5
--- /dev/null
+++ b/test-chat/obj/m20251004-015335_1_2_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 2/2**
+
+π¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-015339_1_2_1/message.json b/test-chat/obj/m20251004-015339_1_2_1/message.json
new file mode 100644
index 00000000..be849e87
--- /dev/null
+++ b/test-chat/obj/m20251004-015339_1_2_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_849f8f5d-e48a-4868-96f2-632d7f7d1583",
+ "workflowId": "5486d66b-c563-4b8b-a48d-f31a6df2dd7e",
+ "parentMessageId": null,
+ "message": "β‘ **Action 1/1** (Method ai.process.generate_plan)\n\nπ¬ Ich erstelle einen detaillierten Aktionsplan, wie Sie ein Word-Dokument mit einer Liste von Primzahlen erstellen und formatieren kΓΆnnen.",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 7,
+ "publishedAt": 1759535619.5754921,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 1,
+ "documentsLabel": "action_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-015339_1_2_1/message_text.txt b/test-chat/obj/m20251004-015339_1_2_1/message_text.txt
new file mode 100644
index 00000000..4694f357
--- /dev/null
+++ b/test-chat/obj/m20251004-015339_1_2_1/message_text.txt
@@ -0,0 +1,3 @@
+β‘ **Action 1/1** (Method ai.process.generate_plan)
+
+π¬ Ich erstelle einen detaillierten Aktionsplan, wie Sie ein Word-Dokument mit einer Liste von Primzahlen erstellen und formatieren kΓΆnnen.
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-015342_1_0_0/message.json b/test-chat/obj/m20251004-015342_1_0_0/message.json
new file mode 100644
index 00000000..f9f11969
--- /dev/null
+++ b/test-chat/obj/m20251004-015342_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_5fbf72e0-e37b-48db-a1f6-82202b8800d7",
+ "workflowId": "5486d66b-c563-4b8b-a48d-f31a6df2dd7e",
+ "parentMessageId": null,
+ "message": "Workflow completed.\n\nProcessed 1 user inputs and generated 7 responses.\nWorkflow status: running",
+ "role": "assistant",
+ "status": "last",
+ "sequenceNr": 9,
+ "publishedAt": 1759535622.6124883,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "workflow_feedback",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-015342_1_0_0/message_text.txt b/test-chat/obj/m20251004-015342_1_0_0/message_text.txt
new file mode 100644
index 00000000..828f4bea
--- /dev/null
+++ b/test-chat/obj/m20251004-015342_1_0_0/message_text.txt
@@ -0,0 +1,4 @@
+Workflow completed.
+
+Processed 1 user inputs and generated 7 responses.
+Workflow status: running
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-015342_1_2_0/message.json b/test-chat/obj/m20251004-015342_1_2_0/message.json
new file mode 100644
index 00000000..f20deebd
--- /dev/null
+++ b/test-chat/obj/m20251004-015342_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_9e66e0b2-301d-453a-a12c-54f6ce4c9164",
+ "workflowId": "5486d66b-c563-4b8b-a48d-f31a6df2dd7e",
+ "parentMessageId": null,
+ "message": "**Task 2**\n\nβ 'Create and format Word document with prime numbers' failed\n\nThe task execution failed due to an unknown method error, and no documents were produced. The primary objective of creating and formatting a Word document with prime numbers was not achieved.\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 8,
+ "publishedAt": 1759535622.4475205,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": null,
+ "actionId": null,
+ "actionMethod": "task",
+ "actionName": "task_error",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-015342_1_2_0/message_text.txt b/test-chat/obj/m20251004-015342_1_2_0/message_text.txt
new file mode 100644
index 00000000..2632f1e4
--- /dev/null
+++ b/test-chat/obj/m20251004-015342_1_2_0/message_text.txt
@@ -0,0 +1,6 @@
+**Task 2**
+
+β 'Create and format Word document with prime numbers' failed
+
+The task execution failed due to an unknown method error, and no documents were produced. The primary objective of creating and formatting a Word document with prime numbers was not achieved.
+
diff --git a/test-chat/obj/m20251004-020301_1_0_0/message.json b/test-chat/obj/m20251004-020301_1_0_0/message.json
new file mode 100644
index 00000000..63e747b9
--- /dev/null
+++ b/test-chat/obj/m20251004-020301_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_3ace7569-05bf-4465-8456-ec2375294f73",
+ "workflowId": "99b9781a-7780-4407-b321-6b336efd8e5e",
+ "parentMessageId": null,
+ "message": "Gib mir die ersten 1000 Primzahlen in einem word dokument aus",
+ "role": "user",
+ "status": "first",
+ "sequenceNr": 1,
+ "publishedAt": 1759536181.3827498,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "round1_task0_action0_context",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020301_1_0_0/message_text.txt b/test-chat/obj/m20251004-020301_1_0_0/message_text.txt
new file mode 100644
index 00000000..2486bd22
--- /dev/null
+++ b/test-chat/obj/m20251004-020301_1_0_0/message_text.txt
@@ -0,0 +1 @@
+Gib mir die ersten 1000 Primzahlen in einem word dokument aus
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020306_1_1_0/message.json b/test-chat/obj/m20251004-020306_1_1_0/message.json
new file mode 100644
index 00000000..59e49d79
--- /dev/null
+++ b/test-chat/obj/m20251004-020306_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_aced5123-75a6-48fd-bb1f-109f84edb6d5",
+ "workflowId": "99b9781a-7780-4407-b321-6b336efd8e5e",
+ "parentMessageId": null,
+ "message": "π **Task 1/2**\n\nπ¬ Generating the list of the first 1000 prime numbers",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 3,
+ "publishedAt": 1759536186.6526492,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020306_1_1_0/message_text.txt b/test-chat/obj/m20251004-020306_1_1_0/message_text.txt
new file mode 100644
index 00000000..1e94ddff
--- /dev/null
+++ b/test-chat/obj/m20251004-020306_1_1_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 1/2**
+
+π¬ Generating the list of the first 1000 prime numbers
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020311_1_1_1/message.json b/test-chat/obj/m20251004-020311_1_1_1/message.json
new file mode 100644
index 00000000..c2c2a064
--- /dev/null
+++ b/test-chat/obj/m20251004-020311_1_1_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_87f6cbe3-c20c-4fdd-86e4-7274c2953a90",
+ "workflowId": "99b9781a-7780-4407-b321-6b336efd8e5e",
+ "parentMessageId": null,
+ "message": "β‘ **Action 1/1** (Method ai.process)\n\nπ¬ Ich erstelle eine prΓ€zise Schritt-fΓΌr-Schritt-Anleitung zur Berechnung der ersten 1000 Primzahlen mit dem Sieb des Eratosthenes.",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 4,
+ "publishedAt": 1759536191.0318685,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "documentsLabel": "action_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020311_1_1_1/message_text.txt b/test-chat/obj/m20251004-020311_1_1_1/message_text.txt
new file mode 100644
index 00000000..552f2f82
--- /dev/null
+++ b/test-chat/obj/m20251004-020311_1_1_1/message_text.txt
@@ -0,0 +1,3 @@
+β‘ **Action 1/1** (Method ai.process)
+
+π¬ Ich erstelle eine prΓ€zise Schritt-fΓΌr-Schritt-Anleitung zur Berechnung der ersten 1000 Primzahlen mit dem Sieb des Eratosthenes.
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020321_1_1_1/message.json b/test-chat/obj/m20251004-020321_1_1_1/message.json
new file mode 100644
index 00000000..e7d55437
--- /dev/null
+++ b/test-chat/obj/m20251004-020321_1_1_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_43935c44-910e-4e8a-b5b2-60a76102fed6",
+ "workflowId": "99b9781a-7780-4407-b321-6b336efd8e5e",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (ai.process)**\n\nβ
Calculate and generate list of first 1000 prime numbers\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 5,
+ "publishedAt": 1759536201.504747,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task1_action1_prime_generation_steps",
+ "actionId": "action_f2269b53-13bd-405f-b2bc-2bdf6005b0a5",
+ "actionMethod": "ai",
+ "actionName": "process",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020321_1_1_1/message_text.txt b/test-chat/obj/m20251004-020321_1_1_1/message_text.txt
new file mode 100644
index 00000000..7f6ea7d7
--- /dev/null
+++ b/test-chat/obj/m20251004-020321_1_1_1/message_text.txt
@@ -0,0 +1,4 @@
+**Action 1/1 (ai.process)**
+
+β
Calculate and generate list of first 1000 prime numbers
+
diff --git a/test-chat/obj/m20251004-020321_1_1_1/round1_task1_action1_prime_generation_steps/document_001_metadata.json b/test-chat/obj/m20251004-020321_1_1_1/round1_task1_action1_prime_generation_steps/document_001_metadata.json
new file mode 100644
index 00000000..be60e972
--- /dev/null
+++ b/test-chat/obj/m20251004-020321_1_1_1/round1_task1_action1_prime_generation_steps/document_001_metadata.json
@@ -0,0 +1,12 @@
+{
+ "id": "158b67f3-2a1b-415d-9afb-3f80f2b63939",
+ "messageId": "msg_43935c44-910e-4e8a-b5b2-60a76102fed6",
+ "fileId": "5e3dc634-24e4-42cd-b4d4-22a80521b4f6",
+ "fileName": "ai_result_r0t0a0_91.txt",
+ "fileSize": 2109,
+ "mimeType": "text/plain",
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "actionId": "action_f2269b53-13bd-405f-b2bc-2bdf6005b0a5"
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020324_1_1_0/message.json b/test-chat/obj/m20251004-020324_1_1_0/message.json
new file mode 100644
index 00000000..440d63a6
--- /dev/null
+++ b/test-chat/obj/m20251004-020324_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_367c3c04-3282-460a-b644-c72db485754f",
+ "workflowId": "99b9781a-7780-4407-b321-6b336efd8e5e",
+ "parentMessageId": null,
+ "message": "π― **Task 1/2**\n\nβ
The task was executed successfully, producing the required document with the list of the first 1000 prime numbers. The document is assumed to be complete and of good quality based on the success status.\nβ’ action success\nβ’ document produced\nβ’ document completeness\nπ Score 9/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 6,
+ "publishedAt": 1759536203.9659646,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020324_1_1_0/message_text.txt b/test-chat/obj/m20251004-020324_1_1_0/message_text.txt
new file mode 100644
index 00000000..5055b8a2
--- /dev/null
+++ b/test-chat/obj/m20251004-020324_1_1_0/message_text.txt
@@ -0,0 +1,7 @@
+π― **Task 1/2**
+
+β
The task was executed successfully, producing the required document with the list of the first 1000 prime numbers. The document is assumed to be complete and of good quality based on the success status.
+β’ action success
+β’ document produced
+β’ document completeness
+π Score 9/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020324_1_2_0/message.json b/test-chat/obj/m20251004-020324_1_2_0/message.json
new file mode 100644
index 00000000..b7a97abb
--- /dev/null
+++ b/test-chat/obj/m20251004-020324_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_e486d585-846e-473f-9faf-106c21fe26f2",
+ "workflowId": "99b9781a-7780-4407-b321-6b336efd8e5e",
+ "parentMessageId": null,
+ "message": "π **Task 2/2**\n\nπ¬ Creating a well-formatted Word document with the prime numbers",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 7,
+ "publishedAt": 1759536204.3357615,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020324_1_2_0/message_text.txt b/test-chat/obj/m20251004-020324_1_2_0/message_text.txt
new file mode 100644
index 00000000..24b54f65
--- /dev/null
+++ b/test-chat/obj/m20251004-020324_1_2_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 2/2**
+
+π¬ Creating a well-formatted Word document with the prime numbers
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020329_1_2_1/message.json b/test-chat/obj/m20251004-020329_1_2_1/message.json
new file mode 100644
index 00000000..b0991041
--- /dev/null
+++ b/test-chat/obj/m20251004-020329_1_2_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_1c33908d-db16-4095-8be2-d20e14533fe1",
+ "workflowId": "99b9781a-7780-4407-b321-6b336efd8e5e",
+ "parentMessageId": null,
+ "message": "β‘ **Action 1/2** (Method ai.process)\n\nπ¬ Ich generiere eine Liste der Primzahlen bis zu einer von Ihnen angegebenen Zahl.",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 8,
+ "publishedAt": 1759536209.1543186,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 1,
+ "documentsLabel": "action_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020329_1_2_1/message_text.txt b/test-chat/obj/m20251004-020329_1_2_1/message_text.txt
new file mode 100644
index 00000000..31821c6f
--- /dev/null
+++ b/test-chat/obj/m20251004-020329_1_2_1/message_text.txt
@@ -0,0 +1,3 @@
+β‘ **Action 1/2** (Method ai.process)
+
+π¬ Ich generiere eine Liste der Primzahlen bis zu einer von Ihnen angegebenen Zahl.
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020335_1_1_1/message.json b/test-chat/obj/m20251004-020335_1_1_1/message.json
new file mode 100644
index 00000000..b85e95bb
--- /dev/null
+++ b/test-chat/obj/m20251004-020335_1_1_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_baf431cd-7fa2-45cd-847c-1a0244f7d8c4",
+ "workflowId": "99b9781a-7780-4407-b321-6b336efd8e5e",
+ "parentMessageId": null,
+ "message": "**Action 1/2 (ai.process)**\n\nβ
Create and format Word document containing the prime numbers\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 9,
+ "publishedAt": 1759536215.6262813,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task1_action1_generate_prime_list",
+ "actionId": "action_1f3ae944-98c3-4a29-8ffd-dab163c0a817",
+ "actionMethod": "ai",
+ "actionName": "process",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020335_1_1_1/message_text.txt b/test-chat/obj/m20251004-020335_1_1_1/message_text.txt
new file mode 100644
index 00000000..0b5c80f7
--- /dev/null
+++ b/test-chat/obj/m20251004-020335_1_1_1/message_text.txt
@@ -0,0 +1,4 @@
+**Action 1/2 (ai.process)**
+
+β
Create and format Word document containing the prime numbers
+
diff --git a/test-chat/obj/m20251004-020335_1_1_1/round1_task1_action1_generate_prime_list/document_001_metadata.json b/test-chat/obj/m20251004-020335_1_1_1/round1_task1_action1_generate_prime_list/document_001_metadata.json
new file mode 100644
index 00000000..164810bf
--- /dev/null
+++ b/test-chat/obj/m20251004-020335_1_1_1/round1_task1_action1_generate_prime_list/document_001_metadata.json
@@ -0,0 +1,12 @@
+{
+ "id": "f882f19f-524c-4133-ada8-0702420cb459",
+ "messageId": "msg_baf431cd-7fa2-45cd-847c-1a0244f7d8c4",
+ "fileId": "37af5e30-0cee-4950-8a40-d075a87cfc8b",
+ "fileName": "ai_result_r0t0a0_92.txt",
+ "fileSize": 56,
+ "mimeType": "text/plain",
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 1,
+ "actionId": "action_1f3ae944-98c3-4a29-8ffd-dab163c0a817"
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020335_1_2_2/message.json b/test-chat/obj/m20251004-020335_1_2_2/message.json
new file mode 100644
index 00000000..d63c37b6
--- /dev/null
+++ b/test-chat/obj/m20251004-020335_1_2_2/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_4f0fd97a-50bd-4ee3-bab3-52a91de181f6",
+ "workflowId": "99b9781a-7780-4407-b321-6b336efd8e5e",
+ "parentMessageId": null,
+ "message": "β‘ **Action 2/2** (Method document.generate)\n\nπ¬ Ich erstelle ein Word-Dokument mit der Liste der Primzahlen.",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 10,
+ "publishedAt": 1759536215.8777142,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 2,
+ "documentsLabel": "action_2_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020335_1_2_2/message_text.txt b/test-chat/obj/m20251004-020335_1_2_2/message_text.txt
new file mode 100644
index 00000000..6ecd9aa7
--- /dev/null
+++ b/test-chat/obj/m20251004-020335_1_2_2/message_text.txt
@@ -0,0 +1,3 @@
+β‘ **Action 2/2** (Method document.generate)
+
+π¬ Ich erstelle ein Word-Dokument mit der Liste der Primzahlen.
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020336_1_1_2/message.json b/test-chat/obj/m20251004-020336_1_1_2/message.json
new file mode 100644
index 00000000..5767f19b
--- /dev/null
+++ b/test-chat/obj/m20251004-020336_1_1_2/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_4265ae9b-2daf-4e6d-894d-c73fb5ffae7e",
+ "workflowId": "99b9781a-7780-4407-b321-6b336efd8e5e",
+ "parentMessageId": null,
+ "message": "**Action 2/2 (document.generate)**\n\nβ Create and format Word document containing the prime numbers\n\nDocument list reference is required\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 11,
+ "publishedAt": 1759536216.0985827,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 2,
+ "documentsLabel": "round1_task1_action2_generate_word_document",
+ "actionId": "action_041d81d0-4b7d-4fa9-99ea-080ad6e8c17b",
+ "actionMethod": "document",
+ "actionName": "generate",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020336_1_1_2/message_text.txt b/test-chat/obj/m20251004-020336_1_1_2/message_text.txt
new file mode 100644
index 00000000..3b6fa84d
--- /dev/null
+++ b/test-chat/obj/m20251004-020336_1_1_2/message_text.txt
@@ -0,0 +1,6 @@
+**Action 2/2 (document.generate)**
+
+β Create and format Word document containing the prime numbers
+
+Document list reference is required
+
diff --git a/test-chat/obj/m20251004-020338_1_0_0/message.json b/test-chat/obj/m20251004-020338_1_0_0/message.json
new file mode 100644
index 00000000..e75f3fef
--- /dev/null
+++ b/test-chat/obj/m20251004-020338_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_61436fb3-8d8f-4b70-b342-f70c9e89dab6",
+ "workflowId": "99b9781a-7780-4407-b321-6b336efd8e5e",
+ "parentMessageId": null,
+ "message": "Workflow completed.\n\nProcessed 1 user inputs and generated 11 responses.\nWorkflow status: running",
+ "role": "assistant",
+ "status": "last",
+ "sequenceNr": 13,
+ "publishedAt": 1759536218.8041105,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "workflow_feedback",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020338_1_0_0/message_text.txt b/test-chat/obj/m20251004-020338_1_0_0/message_text.txt
new file mode 100644
index 00000000..7000823d
--- /dev/null
+++ b/test-chat/obj/m20251004-020338_1_0_0/message_text.txt
@@ -0,0 +1,4 @@
+Workflow completed.
+
+Processed 1 user inputs and generated 11 responses.
+Workflow status: running
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020338_1_2_0/message.json b/test-chat/obj/m20251004-020338_1_2_0/message.json
new file mode 100644
index 00000000..bd4933e9
--- /dev/null
+++ b/test-chat/obj/m20251004-020338_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_b8e63469-66a8-4744-b86b-a3be95ef995e",
+ "workflowId": "99b9781a-7780-4407-b321-6b336efd8e5e",
+ "parentMessageId": null,
+ "message": "**Task 2**\n\nβ 'Create and format Word document containing the prime numbers' failed\n\nThe task objective was to create and format a Word document containing prime numbers, but the output was a plain text file instead. Additionally, the second execution failed due to a missing document list reference.\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 12,
+ "publishedAt": 1759536218.492873,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": null,
+ "actionId": null,
+ "actionMethod": "task",
+ "actionName": "task_error",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-020338_1_2_0/message_text.txt b/test-chat/obj/m20251004-020338_1_2_0/message_text.txt
new file mode 100644
index 00000000..5e656c93
--- /dev/null
+++ b/test-chat/obj/m20251004-020338_1_2_0/message_text.txt
@@ -0,0 +1,6 @@
+**Task 2**
+
+β 'Create and format Word document containing the prime numbers' failed
+
+The task objective was to create and format a Word document containing prime numbers, but the output was a plain text file instead. Additionally, the second execution failed due to a missing document list reference.
+
diff --git a/test-chat/obj/m20251004-021441_1_0_0/message.json b/test-chat/obj/m20251004-021441_1_0_0/message.json
new file mode 100644
index 00000000..adbed6a0
--- /dev/null
+++ b/test-chat/obj/m20251004-021441_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_9429aba5-dabf-4592-ae3a-3698a3fc6d1c",
+ "workflowId": "7fb8ec7f-d9fa-4309-82bc-47f27eb9ffb6",
+ "parentMessageId": null,
+ "message": "Gib mir die ersten 1000 Primzahlen in einem word dokument aus",
+ "role": "user",
+ "status": "first",
+ "sequenceNr": 1,
+ "publishedAt": 1759536881.7816095,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "round1_task0_action0_context",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021441_1_0_0/message_text.txt b/test-chat/obj/m20251004-021441_1_0_0/message_text.txt
new file mode 100644
index 00000000..2486bd22
--- /dev/null
+++ b/test-chat/obj/m20251004-021441_1_0_0/message_text.txt
@@ -0,0 +1 @@
+Gib mir die ersten 1000 Primzahlen in einem word dokument aus
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021446_1_1_0/message.json b/test-chat/obj/m20251004-021446_1_1_0/message.json
new file mode 100644
index 00000000..195c0950
--- /dev/null
+++ b/test-chat/obj/m20251004-021446_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_11d84d32-70b9-455c-b068-7d6c20ee2245",
+ "workflowId": "7fb8ec7f-d9fa-4309-82bc-47f27eb9ffb6",
+ "parentMessageId": null,
+ "message": "π **Task Plan**\n\nIch werde die ersten 1000 Primzahlen berechnen und in einem formatierten Word-Dokument fΓΌr Sie ausgeben.\n\nπ¬ Berechne die ersten 1000 Primzahlen\n\nπ¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 2,
+ "publishedAt": 1759536886.909841,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_plan",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021446_1_1_0/message_text.txt b/test-chat/obj/m20251004-021446_1_1_0/message_text.txt
new file mode 100644
index 00000000..93739fb0
--- /dev/null
+++ b/test-chat/obj/m20251004-021446_1_1_0/message_text.txt
@@ -0,0 +1,8 @@
+π **Task Plan**
+
+Ich werde die ersten 1000 Primzahlen berechnen und in einem formatierten Word-Dokument fΓΌr Sie ausgeben.
+
+π¬ Berechne die ersten 1000 Primzahlen
+
+π¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen
+
diff --git a/test-chat/obj/m20251004-021447_1_1_0/message.json b/test-chat/obj/m20251004-021447_1_1_0/message.json
new file mode 100644
index 00000000..3198d253
--- /dev/null
+++ b/test-chat/obj/m20251004-021447_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_a9794603-fcca-4ac0-a65c-77eb5a14a79d",
+ "workflowId": "7fb8ec7f-d9fa-4309-82bc-47f27eb9ffb6",
+ "parentMessageId": null,
+ "message": "π **Task 1/2**\n\nπ¬ Berechne die ersten 1000 Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 3,
+ "publishedAt": 1759536887.055933,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021447_1_1_0/message_text.txt b/test-chat/obj/m20251004-021447_1_1_0/message_text.txt
new file mode 100644
index 00000000..c4301ba7
--- /dev/null
+++ b/test-chat/obj/m20251004-021447_1_1_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 1/2**
+
+π¬ Berechne die ersten 1000 Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021451_1_1_1/message.json b/test-chat/obj/m20251004-021451_1_1_1/message.json
new file mode 100644
index 00000000..83db8600
--- /dev/null
+++ b/test-chat/obj/m20251004-021451_1_1_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_a25613ed-11c0-4770-ac92-3176e069b83c",
+ "workflowId": "7fb8ec7f-d9fa-4309-82bc-47f27eb9ffb6",
+ "parentMessageId": null,
+ "message": "β‘ **Action 1/1** (Method ai.process)\n\nπ¬ Ich generiere eine Python-Funktion, die mit dem Sieb des Eratosthenes die ersten 1000 Primzahlen berechnet.",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 4,
+ "publishedAt": 1759536891.1863303,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "documentsLabel": "action_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021451_1_1_1/message_text.txt b/test-chat/obj/m20251004-021451_1_1_1/message_text.txt
new file mode 100644
index 00000000..03cbde5d
--- /dev/null
+++ b/test-chat/obj/m20251004-021451_1_1_1/message_text.txt
@@ -0,0 +1,3 @@
+β‘ **Action 1/1** (Method ai.process)
+
+π¬ Ich generiere eine Python-Funktion, die mit dem Sieb des Eratosthenes die ersten 1000 Primzahlen berechnet.
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021457_1_1_1/message.json b/test-chat/obj/m20251004-021457_1_1_1/message.json
new file mode 100644
index 00000000..36258182
--- /dev/null
+++ b/test-chat/obj/m20251004-021457_1_1_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_67df064a-d7e5-4ed0-836f-bb6f77b8b43e",
+ "workflowId": "7fb8ec7f-d9fa-4309-82bc-47f27eb9ffb6",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (ai.process)**\n\nβ
Calculate first 1000 prime numbers\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 5,
+ "publishedAt": 1759536897.3372462,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task1_action1_generate_prime_sieve_function",
+ "actionId": "action_7e9a034f-099c-4fe7-8def-dc627282ec9c",
+ "actionMethod": "ai",
+ "actionName": "process",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021457_1_1_1/message_text.txt b/test-chat/obj/m20251004-021457_1_1_1/message_text.txt
new file mode 100644
index 00000000..2618a7a1
--- /dev/null
+++ b/test-chat/obj/m20251004-021457_1_1_1/message_text.txt
@@ -0,0 +1,4 @@
+**Action 1/1 (ai.process)**
+
+β
Calculate first 1000 prime numbers
+
diff --git a/test-chat/obj/m20251004-021457_1_1_1/round1_task1_action1_generate_prime_sieve_function/document_001_metadata.json b/test-chat/obj/m20251004-021457_1_1_1/round1_task1_action1_generate_prime_sieve_function/document_001_metadata.json
new file mode 100644
index 00000000..986390ab
--- /dev/null
+++ b/test-chat/obj/m20251004-021457_1_1_1/round1_task1_action1_generate_prime_sieve_function/document_001_metadata.json
@@ -0,0 +1,12 @@
+{
+ "id": "ef3d8b62-de43-40ae-8d97-70e00e4f95c5",
+ "messageId": "msg_67df064a-d7e5-4ed0-836f-bb6f77b8b43e",
+ "fileId": "52a263a1-d4c7-4cfb-b4dc-7239091c327e",
+ "fileName": "ai_result_r0t0a0_93.txt",
+ "fileSize": 1026,
+ "mimeType": "text/plain",
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "actionId": "action_7e9a034f-099c-4fe7-8def-dc627282ec9c"
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021500_1_1_0/message.json b/test-chat/obj/m20251004-021500_1_1_0/message.json
new file mode 100644
index 00000000..db772513
--- /dev/null
+++ b/test-chat/obj/m20251004-021500_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_b9ea0ba5-ce00-46b6-911c-b46fbbdf0360",
+ "workflowId": "7fb8ec7f-d9fa-4309-82bc-47f27eb9ffb6",
+ "parentMessageId": null,
+ "message": "π― **Task 1/2**\n\nβ
The task was executed successfully, producing the required document with the calculated prime numbers. The document appears to be complete and of good quality.\nβ’ Review each action's success/failure status\nβ’ Check if required documents were produced\nβ’ Validate document quality and completeness\nβ’ Assess if success criteria were met\nπ Score 9/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 6,
+ "publishedAt": 1759536900.503364,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021500_1_1_0/message_text.txt b/test-chat/obj/m20251004-021500_1_1_0/message_text.txt
new file mode 100644
index 00000000..9690d3e7
--- /dev/null
+++ b/test-chat/obj/m20251004-021500_1_1_0/message_text.txt
@@ -0,0 +1,8 @@
+π― **Task 1/2**
+
+β
The task was executed successfully, producing the required document with the calculated prime numbers. The document appears to be complete and of good quality.
+β’ Review each action's success/failure status
+β’ Check if required documents were produced
+β’ Validate document quality and completeness
+β’ Assess if success criteria were met
+π Score 9/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021500_1_2_0/message.json b/test-chat/obj/m20251004-021500_1_2_0/message.json
new file mode 100644
index 00000000..0fbb9273
--- /dev/null
+++ b/test-chat/obj/m20251004-021500_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_6f332064-dfb2-4c8b-9d2a-a7a08865c665",
+ "workflowId": "7fb8ec7f-d9fa-4309-82bc-47f27eb9ffb6",
+ "parentMessageId": null,
+ "message": "π **Task 2/2**\n\nπ¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 7,
+ "publishedAt": 1759536900.7652051,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021500_1_2_0/message_text.txt b/test-chat/obj/m20251004-021500_1_2_0/message_text.txt
new file mode 100644
index 00000000..7efb03e5
--- /dev/null
+++ b/test-chat/obj/m20251004-021500_1_2_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 2/2**
+
+π¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021504_1_2_1/message.json b/test-chat/obj/m20251004-021504_1_2_1/message.json
new file mode 100644
index 00000000..5511eacd
--- /dev/null
+++ b/test-chat/obj/m20251004-021504_1_2_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_c8326807-a8ee-4f21-a6e1-d132e50f6796",
+ "workflowId": "7fb8ec7f-d9fa-4309-82bc-47f27eb9ffb6",
+ "parentMessageId": null,
+ "message": "β‘ **Action 1/1** (Method ai.process)\n\nπ¬ Ich generiere eine Liste von Primzahlen zwischen 1 und 100, die im Word-Dokument verwendet werden kann.",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 8,
+ "publishedAt": 1759536904.264426,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 1,
+ "documentsLabel": "action_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021504_1_2_1/message_text.txt b/test-chat/obj/m20251004-021504_1_2_1/message_text.txt
new file mode 100644
index 00000000..47482be6
--- /dev/null
+++ b/test-chat/obj/m20251004-021504_1_2_1/message_text.txt
@@ -0,0 +1,3 @@
+β‘ **Action 1/1** (Method ai.process)
+
+π¬ Ich generiere eine Liste von Primzahlen zwischen 1 und 100, die im Word-Dokument verwendet werden kann.
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021507_1_1_1/message.json b/test-chat/obj/m20251004-021507_1_1_1/message.json
new file mode 100644
index 00000000..781aff91
--- /dev/null
+++ b/test-chat/obj/m20251004-021507_1_1_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_25a72c60-b658-43c3-88c1-1b65e8f12451",
+ "workflowId": "7fb8ec7f-d9fa-4309-82bc-47f27eb9ffb6",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (ai.process)**\n\nβ
Create and format Word document with prime numbers\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 9,
+ "publishedAt": 1759536907.8752341,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task1_action1_generate_prime_numbers",
+ "actionId": "action_f79702af-728b-4562-8278-9ab9364b212c",
+ "actionMethod": "ai",
+ "actionName": "process",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220853_1_2_1/message_text.txt b/test-chat/obj/m20251004-021507_1_1_1/message_text.txt
similarity index 100%
rename from test-chat/obj/m20251003-220853_1_2_1/message_text.txt
rename to test-chat/obj/m20251004-021507_1_1_1/message_text.txt
diff --git a/test-chat/obj/m20251004-021507_1_1_1/round1_task1_action1_generate_prime_numbers/document_001_metadata.json b/test-chat/obj/m20251004-021507_1_1_1/round1_task1_action1_generate_prime_numbers/document_001_metadata.json
new file mode 100644
index 00000000..6c9c882d
--- /dev/null
+++ b/test-chat/obj/m20251004-021507_1_1_1/round1_task1_action1_generate_prime_numbers/document_001_metadata.json
@@ -0,0 +1,12 @@
+{
+ "id": "4521d52b-9bab-4b44-9bb9-1532bb2dc271",
+ "messageId": "msg_25a72c60-b658-43c3-88c1-1b65e8f12451",
+ "fileId": "3a896ad3-68d8-4f35-9bf7-dc290defdf52",
+ "fileName": "ai_result_r0t0a0_94.txt",
+ "fileSize": 94,
+ "mimeType": "text/plain",
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 1,
+ "actionId": "action_f79702af-728b-4562-8278-9ab9364b212c"
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220907_1_0_0/message.json b/test-chat/obj/m20251004-021511_1_0_0/message.json
similarity index 64%
rename from test-chat/obj/m20251003-220907_1_0_0/message.json
rename to test-chat/obj/m20251004-021511_1_0_0/message.json
index 0eadb060..cc3f0462 100644
--- a/test-chat/obj/m20251003-220907_1_0_0/message.json
+++ b/test-chat/obj/m20251004-021511_1_0_0/message.json
@@ -1,12 +1,12 @@
{
- "id": "msg_5a285839-6b7c-48a0-811f-c35439afa498",
- "workflowId": "4886a461-687b-4980-853d-91251f9424ac",
+ "id": "msg_346c5dcb-5a78-4a6d-a2c2-f72a422df78a",
+ "workflowId": "7fb8ec7f-d9fa-4309-82bc-47f27eb9ffb6",
"parentMessageId": null,
- "message": "Workflow completed.\n\nProcessed 1 user inputs and generated 6 responses.\nWorkflow status: running",
+ "message": "Workflow completed.\n\nProcessed 1 user inputs and generated 9 responses.\nWorkflow status: running",
"role": "assistant",
"status": "last",
"sequenceNr": 11,
- "publishedAt": 1759522146.9623349,
+ "publishedAt": 1759536911.3753736,
"roundNumber": 1,
"taskNumber": 0,
"actionNumber": 0,
diff --git a/test-chat/obj/m20251004-021511_1_0_0/message_text.txt b/test-chat/obj/m20251004-021511_1_0_0/message_text.txt
new file mode 100644
index 00000000..f7827e0a
--- /dev/null
+++ b/test-chat/obj/m20251004-021511_1_0_0/message_text.txt
@@ -0,0 +1,4 @@
+Workflow completed.
+
+Processed 1 user inputs and generated 9 responses.
+Workflow status: running
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021511_1_2_0/message.json b/test-chat/obj/m20251004-021511_1_2_0/message.json
new file mode 100644
index 00000000..7630115b
--- /dev/null
+++ b/test-chat/obj/m20251004-021511_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_695af4a9-45d8-443c-bf55-bf0aa031ee1f",
+ "workflowId": "7fb8ec7f-d9fa-4309-82bc-47f27eb9ffb6",
+ "parentMessageId": null,
+ "message": "**Task 2**\n\nβ 'Create and format Word document with prime numbers' failed\n\nThe task was to create and format a Word document with prime numbers, but the result was a text file instead of a Word document. This does not meet the task objective.\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 10,
+ "publishedAt": 1759536911.2106004,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": null,
+ "actionId": null,
+ "actionMethod": "task",
+ "actionName": "task_error",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021511_1_2_0/message_text.txt b/test-chat/obj/m20251004-021511_1_2_0/message_text.txt
new file mode 100644
index 00000000..ad89aeb1
--- /dev/null
+++ b/test-chat/obj/m20251004-021511_1_2_0/message_text.txt
@@ -0,0 +1,6 @@
+**Task 2**
+
+β 'Create and format Word document with prime numbers' failed
+
+The task was to create and format a Word document with prime numbers, but the result was a text file instead of a Word document. This does not meet the task objective.
+
diff --git a/test-chat/obj/m20251004-021746_1_0_0/message.json b/test-chat/obj/m20251004-021746_1_0_0/message.json
new file mode 100644
index 00000000..6f318084
--- /dev/null
+++ b/test-chat/obj/m20251004-021746_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_5ccd1186-d964-4832-af18-0deb14af15ef",
+ "workflowId": "cf6ced75-9c01-44c2-90d4-ce023252ae31",
+ "parentMessageId": null,
+ "message": "Gib mir die ersten 1000 Primzahlen in einem word dokument aus",
+ "role": "user",
+ "status": "first",
+ "sequenceNr": 1,
+ "publishedAt": 1759537066.4937792,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "round1_task0_action0_context",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021746_1_0_0/message_text.txt b/test-chat/obj/m20251004-021746_1_0_0/message_text.txt
new file mode 100644
index 00000000..2486bd22
--- /dev/null
+++ b/test-chat/obj/m20251004-021746_1_0_0/message_text.txt
@@ -0,0 +1 @@
+Gib mir die ersten 1000 Primzahlen in einem word dokument aus
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021751_1_1_0/message.json b/test-chat/obj/m20251004-021751_1_1_0/message.json
new file mode 100644
index 00000000..10f8e0c2
--- /dev/null
+++ b/test-chat/obj/m20251004-021751_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_19f2d335-4ff3-4ee5-a8ee-97943d8f6286",
+ "workflowId": "cf6ced75-9c01-44c2-90d4-ce023252ae31",
+ "parentMessageId": null,
+ "message": "π **Task 1/2**\n\nπ¬ Berechne die ersten 1000 Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 3,
+ "publishedAt": 1759537071.8939424,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021751_1_1_0/message_text.txt b/test-chat/obj/m20251004-021751_1_1_0/message_text.txt
new file mode 100644
index 00000000..c4301ba7
--- /dev/null
+++ b/test-chat/obj/m20251004-021751_1_1_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 1/2**
+
+π¬ Berechne die ersten 1000 Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021801_1_1_0/message.json b/test-chat/obj/m20251004-021801_1_1_0/message.json
new file mode 100644
index 00000000..d9046d8d
--- /dev/null
+++ b/test-chat/obj/m20251004-021801_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_63376b49-f81f-4f9b-b945-a0992232ca7f",
+ "workflowId": "cf6ced75-9c01-44c2-90d4-ce023252ae31",
+ "parentMessageId": null,
+ "message": "π― **Task 1/2**\n\nβ
Completed\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 5,
+ "publishedAt": 1759537081.0951383,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021801_1_1_0/message_text.txt b/test-chat/obj/m20251004-021801_1_1_0/message_text.txt
new file mode 100644
index 00000000..ef9599dd
--- /dev/null
+++ b/test-chat/obj/m20251004-021801_1_1_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 1/2**
+
+β
Completed
+π Score 8/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021801_1_1_1/message.json b/test-chat/obj/m20251004-021801_1_1_1/message.json
new file mode 100644
index 00000000..e64a20cb
--- /dev/null
+++ b/test-chat/obj/m20251004-021801_1_1_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_da3b0649-f5f0-4fc7-bcb7-1e77cfd80fa9",
+ "workflowId": "cf6ced75-9c01-44c2-90d4-ce023252ae31",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (ai.process)**\n\nβ
Calculate and validate first 1000 prime numbers\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 4,
+ "publishedAt": 1759537081.034544,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task1_action1_results",
+ "actionId": "action_2d29be22-aa3a-4d7a-91b0-93766860b180",
+ "actionMethod": "ai",
+ "actionName": "process",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021801_1_1_1/message_text.txt b/test-chat/obj/m20251004-021801_1_1_1/message_text.txt
new file mode 100644
index 00000000..5901cd49
--- /dev/null
+++ b/test-chat/obj/m20251004-021801_1_1_1/message_text.txt
@@ -0,0 +1,4 @@
+**Action 1/1 (ai.process)**
+
+β
Calculate and validate first 1000 prime numbers
+
diff --git a/test-chat/obj/m20251004-021801_1_1_1/round1_task1_action1_results/document_001_metadata.json b/test-chat/obj/m20251004-021801_1_1_1/round1_task1_action1_results/document_001_metadata.json
new file mode 100644
index 00000000..ce974a7a
--- /dev/null
+++ b/test-chat/obj/m20251004-021801_1_1_1/round1_task1_action1_results/document_001_metadata.json
@@ -0,0 +1,12 @@
+{
+ "id": "6e57972c-18be-476e-aabc-2f608552cc65",
+ "messageId": "msg_da3b0649-f5f0-4fc7-bcb7-1e77cfd80fa9",
+ "fileId": "cf7ef904-7c3d-49a7-9efc-7d4edc8d1cff",
+ "fileName": "ai_result_r0t0a0_95.txt",
+ "fileSize": 929,
+ "mimeType": "text/plain",
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "actionId": "action_2d29be22-aa3a-4d7a-91b0-93766860b180"
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021801_1_2_0/message.json b/test-chat/obj/m20251004-021801_1_2_0/message.json
new file mode 100644
index 00000000..234d5b49
--- /dev/null
+++ b/test-chat/obj/m20251004-021801_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_7d2ce8b5-9aaa-4298-8a50-ba6f53ef9559",
+ "workflowId": "cf6ced75-9c01-44c2-90d4-ce023252ae31",
+ "parentMessageId": null,
+ "message": "π **Task 2/2**\n\nπ¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 6,
+ "publishedAt": 1759537081.3185987,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021801_1_2_0/message_text.txt b/test-chat/obj/m20251004-021801_1_2_0/message_text.txt
new file mode 100644
index 00000000..7efb03e5
--- /dev/null
+++ b/test-chat/obj/m20251004-021801_1_2_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 2/2**
+
+π¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021811_1_0_0/message.json b/test-chat/obj/m20251004-021811_1_0_0/message.json
new file mode 100644
index 00000000..61392aaa
--- /dev/null
+++ b/test-chat/obj/m20251004-021811_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_1dc3b6fa-f6c7-4887-943d-17e53d5248e9",
+ "workflowId": "cf6ced75-9c01-44c2-90d4-ce023252ae31",
+ "parentMessageId": null,
+ "message": "Workflow completed.\n\nProcessed 1 user inputs and generated 7 responses.\nWorkflow status: running",
+ "role": "assistant",
+ "status": "last",
+ "sequenceNr": 9,
+ "publishedAt": 1759537091.437507,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "workflow_feedback",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021811_1_0_0/message_text.txt b/test-chat/obj/m20251004-021811_1_0_0/message_text.txt
new file mode 100644
index 00000000..828f4bea
--- /dev/null
+++ b/test-chat/obj/m20251004-021811_1_0_0/message_text.txt
@@ -0,0 +1,4 @@
+Workflow completed.
+
+Processed 1 user inputs and generated 7 responses.
+Workflow status: running
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021811_1_2_0/message.json b/test-chat/obj/m20251004-021811_1_2_0/message.json
new file mode 100644
index 00000000..c421c51d
--- /dev/null
+++ b/test-chat/obj/m20251004-021811_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_7c47a538-e7c1-485d-a89f-c666eb86e2d5",
+ "workflowId": "cf6ced75-9c01-44c2-90d4-ce023252ae31",
+ "parentMessageId": null,
+ "message": "π― **Task 2/2**\n\nβ
Completed\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 8,
+ "publishedAt": 1759537091.2906082,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-021811_1_2_0/message_text.txt b/test-chat/obj/m20251004-021811_1_2_0/message_text.txt
new file mode 100644
index 00000000..e46b0f2c
--- /dev/null
+++ b/test-chat/obj/m20251004-021811_1_2_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 2/2**
+
+β
Completed
+π Score 8/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220853_1_2_1/message.json b/test-chat/obj/m20251004-021811_1_2_1/message.json
similarity index 65%
rename from test-chat/obj/m20251003-220853_1_2_1/message.json
rename to test-chat/obj/m20251004-021811_1_2_1/message.json
index 7b84b9bb..7921677c 100644
--- a/test-chat/obj/m20251003-220853_1_2_1/message.json
+++ b/test-chat/obj/m20251004-021811_1_2_1/message.json
@@ -1,17 +1,17 @@
{
- "id": "msg_08fa0763-a33c-4ad2-81f8-e943354dc4e5",
- "workflowId": "4886a461-687b-4980-853d-91251f9424ac",
+ "id": "msg_0f99bdda-800b-482f-9925-2c4f0718b47e",
+ "workflowId": "cf6ced75-9c01-44c2-90d4-ce023252ae31",
"parentMessageId": null,
"message": "**Action 1/1 (ai.process)**\n\nβ
Create and format Word document with prime numbers\n\n",
"role": "assistant",
"status": "step",
"sequenceNr": 7,
- "publishedAt": 1759522133.1566048,
+ "publishedAt": 1759537091.2123017,
"roundNumber": 1,
"taskNumber": 2,
"actionNumber": 1,
"documentsLabel": "round1_task2_action1_results",
- "actionId": "action_ea265db5-c27e-43bc-8667-182369622318",
+ "actionId": "action_120ba2a0-72b4-43a3-b419-f4eb042d84ec",
"actionMethod": "ai",
"actionName": "process",
"success": null,
diff --git a/test-chat/obj/m20251003-220904_1_2_2/message_text.txt b/test-chat/obj/m20251004-021811_1_2_1/message_text.txt
similarity index 67%
rename from test-chat/obj/m20251003-220904_1_2_2/message_text.txt
rename to test-chat/obj/m20251004-021811_1_2_1/message_text.txt
index a650c446..87daba79 100644
--- a/test-chat/obj/m20251003-220904_1_2_2/message_text.txt
+++ b/test-chat/obj/m20251004-021811_1_2_1/message_text.txt
@@ -1,4 +1,4 @@
-**Action 2/1 (ai.process)**
+**Action 1/1 (ai.process)**
β
Create and format Word document with prime numbers
diff --git a/test-chat/obj/m20251004-021811_1_2_1/round1_task2_action1_results/document_001_metadata.json b/test-chat/obj/m20251004-021811_1_2_1/round1_task2_action1_results/document_001_metadata.json
new file mode 100644
index 00000000..d1469ac7
--- /dev/null
+++ b/test-chat/obj/m20251004-021811_1_2_1/round1_task2_action1_results/document_001_metadata.json
@@ -0,0 +1,12 @@
+{
+ "id": "07d2bedc-7f8a-4a71-bdc5-b52fa96e36c3",
+ "messageId": "msg_0f99bdda-800b-482f-9925-2c4f0718b47e",
+ "fileId": "b931132d-454b-44ea-82e2-01450637416d",
+ "fileName": "ai_result_r0t0a0_96.txt",
+ "fileSize": 284,
+ "mimeType": "text/plain",
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 1,
+ "actionId": "action_120ba2a0-72b4-43a3-b419-f4eb042d84ec"
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-022428_1_0_0/message.json b/test-chat/obj/m20251004-022428_1_0_0/message.json
new file mode 100644
index 00000000..6a8240d1
--- /dev/null
+++ b/test-chat/obj/m20251004-022428_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_8e0da102-93b9-4cb5-93dc-41d82da1587e",
+ "workflowId": "a6e6f220-1c2c-4efe-9a67-c6e4102232ca",
+ "parentMessageId": null,
+ "message": "Gib mir die ersten 1000 Primzahlen in einem word dokument aus",
+ "role": "user",
+ "status": "first",
+ "sequenceNr": 1,
+ "publishedAt": 1759537468.2621148,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "round1_task0_action0_context",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-022428_1_0_0/message_text.txt b/test-chat/obj/m20251004-022428_1_0_0/message_text.txt
new file mode 100644
index 00000000..2486bd22
--- /dev/null
+++ b/test-chat/obj/m20251004-022428_1_0_0/message_text.txt
@@ -0,0 +1 @@
+Gib mir die ersten 1000 Primzahlen in einem word dokument aus
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-022433_1_1_0/message.json b/test-chat/obj/m20251004-022433_1_1_0/message.json
new file mode 100644
index 00000000..b3c6fef4
--- /dev/null
+++ b/test-chat/obj/m20251004-022433_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_07d1317e-aafe-469d-8156-3d98feac237e",
+ "workflowId": "a6e6f220-1c2c-4efe-9a67-c6e4102232ca",
+ "parentMessageId": null,
+ "message": "π **Task 1/2**\n\nπ¬ Berechne die ersten 1000 Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 3,
+ "publishedAt": 1759537473.2126782,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-022433_1_1_0/message_text.txt b/test-chat/obj/m20251004-022433_1_1_0/message_text.txt
new file mode 100644
index 00000000..c4301ba7
--- /dev/null
+++ b/test-chat/obj/m20251004-022433_1_1_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 1/2**
+
+π¬ Berechne die ersten 1000 Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-022437_1_1_0/message.json b/test-chat/obj/m20251004-022437_1_1_0/message.json
new file mode 100644
index 00000000..2348fb0c
--- /dev/null
+++ b/test-chat/obj/m20251004-022437_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_6793e9cd-6d1a-49d9-8912-d7cc62a4be86",
+ "workflowId": "a6e6f220-1c2c-4efe-9a67-c6e4102232ca",
+ "parentMessageId": null,
+ "message": "π― **Task 1/2**\n\nβ
Completed\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 4,
+ "publishedAt": 1759537477.0633435,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-022437_1_1_0/message_text.txt b/test-chat/obj/m20251004-022437_1_1_0/message_text.txt
new file mode 100644
index 00000000..ef9599dd
--- /dev/null
+++ b/test-chat/obj/m20251004-022437_1_1_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 1/2**
+
+β
Completed
+π Score 8/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251003-220843_1_2_0/message.json b/test-chat/obj/m20251004-022437_1_2_0/message.json
similarity index 61%
rename from test-chat/obj/m20251003-220843_1_2_0/message.json
rename to test-chat/obj/m20251004-022437_1_2_0/message.json
index 9d3ed129..7f12a2e8 100644
--- a/test-chat/obj/m20251003-220843_1_2_0/message.json
+++ b/test-chat/obj/m20251004-022437_1_2_0/message.json
@@ -1,12 +1,12 @@
{
- "id": "msg_d14a05bd-90ca-4d3b-a6b3-03244e8e4e18",
- "workflowId": "4886a461-687b-4980-853d-91251f9424ac",
+ "id": "msg_087ee318-a2ba-434d-b951-e225f5f8fbf0",
+ "workflowId": "a6e6f220-1c2c-4efe-9a67-c6e4102232ca",
"parentMessageId": null,
- "message": "π **Task 2/2**\n\nπ¬ Erstelle ein Word-Dokument und formatiere die Primzahlen ΓΌbersichtlich",
+ "message": "π **Task 2/2**\n\nπ¬ Erstelle ein Word-Dokument mit den berechneten Primzahlen",
"role": "assistant",
"status": "step",
- "sequenceNr": 6,
- "publishedAt": 1759522123.165301,
+ "sequenceNr": 5,
+ "publishedAt": 1759537477.2927067,
"roundNumber": 1,
"taskNumber": 2,
"actionNumber": 0,
diff --git a/test-chat/obj/m20251004-022437_1_2_0/message_text.txt b/test-chat/obj/m20251004-022437_1_2_0/message_text.txt
new file mode 100644
index 00000000..3dde58ed
--- /dev/null
+++ b/test-chat/obj/m20251004-022437_1_2_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 2/2**
+
+π¬ Erstelle ein Word-Dokument mit den berechneten Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-022440_1_0_0/message.json b/test-chat/obj/m20251004-022440_1_0_0/message.json
new file mode 100644
index 00000000..1bcfef76
--- /dev/null
+++ b/test-chat/obj/m20251004-022440_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_8274257e-7af8-45c8-8377-9b9d890bd30a",
+ "workflowId": "a6e6f220-1c2c-4efe-9a67-c6e4102232ca",
+ "parentMessageId": null,
+ "message": "Workflow completed.\n\nProcessed 1 user inputs and generated 5 responses.\nWorkflow status: running",
+ "role": "assistant",
+ "status": "last",
+ "sequenceNr": 7,
+ "publishedAt": 1759537480.6174448,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "workflow_feedback",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-022440_1_0_0/message_text.txt b/test-chat/obj/m20251004-022440_1_0_0/message_text.txt
new file mode 100644
index 00000000..15e27608
--- /dev/null
+++ b/test-chat/obj/m20251004-022440_1_0_0/message_text.txt
@@ -0,0 +1,4 @@
+Workflow completed.
+
+Processed 1 user inputs and generated 5 responses.
+Workflow status: running
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-022440_1_2_0/message.json b/test-chat/obj/m20251004-022440_1_2_0/message.json
new file mode 100644
index 00000000..b4035d7d
--- /dev/null
+++ b/test-chat/obj/m20251004-022440_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_c81c548e-eacd-46b6-81b7-c766ee2d7c54",
+ "workflowId": "a6e6f220-1c2c-4efe-9a67-c6e4102232ca",
+ "parentMessageId": null,
+ "message": "π― **Task 2/2**\n\nβ
Completed\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 6,
+ "publishedAt": 1759537480.4937482,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-022440_1_2_0/message_text.txt b/test-chat/obj/m20251004-022440_1_2_0/message_text.txt
new file mode 100644
index 00000000..e46b0f2c
--- /dev/null
+++ b/test-chat/obj/m20251004-022440_1_2_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 2/2**
+
+β
Completed
+π Score 8/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023003_1_0_0/message.json b/test-chat/obj/m20251004-023003_1_0_0/message.json
new file mode 100644
index 00000000..b86e2f48
--- /dev/null
+++ b/test-chat/obj/m20251004-023003_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_5b00016f-e6e1-4bf3-a9e2-0055dbddd6e9",
+ "workflowId": "ae1dc81f-f2c6-4776-b651-7b4a7fceaaa1",
+ "parentMessageId": null,
+ "message": "Gib mir die ersten 1000 Primzahlen in einem word dokument aus",
+ "role": "user",
+ "status": "first",
+ "sequenceNr": 1,
+ "publishedAt": 1759537803.3751707,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "round1_task0_action0_context",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023003_1_0_0/message_text.txt b/test-chat/obj/m20251004-023003_1_0_0/message_text.txt
new file mode 100644
index 00000000..2486bd22
--- /dev/null
+++ b/test-chat/obj/m20251004-023003_1_0_0/message_text.txt
@@ -0,0 +1 @@
+Gib mir die ersten 1000 Primzahlen in einem word dokument aus
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023008_1_1_0/message.json b/test-chat/obj/m20251004-023008_1_1_0/message.json
new file mode 100644
index 00000000..a082fd4b
--- /dev/null
+++ b/test-chat/obj/m20251004-023008_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_4f621c88-84ca-4c6d-8b25-bb55e6f47f22",
+ "workflowId": "ae1dc81f-f2c6-4776-b651-7b4a7fceaaa1",
+ "parentMessageId": null,
+ "message": "π **Task 1/2**\n\nπ¬ Berechne die ersten 1000 Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 3,
+ "publishedAt": 1759537808.6724803,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023008_1_1_0/message_text.txt b/test-chat/obj/m20251004-023008_1_1_0/message_text.txt
new file mode 100644
index 00000000..c4301ba7
--- /dev/null
+++ b/test-chat/obj/m20251004-023008_1_1_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 1/2**
+
+π¬ Berechne die ersten 1000 Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023011_1_1_0/message.json b/test-chat/obj/m20251004-023011_1_1_0/message.json
new file mode 100644
index 00000000..1820f1a0
--- /dev/null
+++ b/test-chat/obj/m20251004-023011_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_141cfe98-c96e-4711-b590-b39d27df68b0",
+ "workflowId": "ae1dc81f-f2c6-4776-b651-7b4a7fceaaa1",
+ "parentMessageId": null,
+ "message": "π― **Task 1/2**\n\nβ
Completed\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 5,
+ "publishedAt": 1759537811.8603764,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023011_1_1_0/message_text.txt b/test-chat/obj/m20251004-023011_1_1_0/message_text.txt
new file mode 100644
index 00000000..ef9599dd
--- /dev/null
+++ b/test-chat/obj/m20251004-023011_1_1_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 1/2**
+
+β
Completed
+π Score 8/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023011_1_1_1/message.json b/test-chat/obj/m20251004-023011_1_1_1/message.json
new file mode 100644
index 00000000..89d127c9
--- /dev/null
+++ b/test-chat/obj/m20251004-023011_1_1_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_7d974eed-481e-46bf-b32e-ca1ddb2af2e5",
+ "workflowId": "ae1dc81f-f2c6-4776-b651-7b4a7fceaaa1",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (ai.process)**\n\nβ Calculate and validate first 1000 prime numbers\n\nAI prompt is required\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 4,
+ "publishedAt": 1759537811.8009567,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task1_action1_results",
+ "actionId": "action_6c439238-e997-469d-827a-6db7b8948100",
+ "actionMethod": "ai",
+ "actionName": "process",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023011_1_1_1/message_text.txt b/test-chat/obj/m20251004-023011_1_1_1/message_text.txt
new file mode 100644
index 00000000..59b169a1
--- /dev/null
+++ b/test-chat/obj/m20251004-023011_1_1_1/message_text.txt
@@ -0,0 +1,6 @@
+**Action 1/1 (ai.process)**
+
+β Calculate and validate first 1000 prime numbers
+
+AI prompt is required
+
diff --git a/test-chat/obj/m20251004-023012_1_2_0/message.json b/test-chat/obj/m20251004-023012_1_2_0/message.json
new file mode 100644
index 00000000..9935dba8
--- /dev/null
+++ b/test-chat/obj/m20251004-023012_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_0538e8fe-b55e-47fe-828a-211e13a7b0c9",
+ "workflowId": "ae1dc81f-f2c6-4776-b651-7b4a7fceaaa1",
+ "parentMessageId": null,
+ "message": "π **Task 2/2**\n\nπ¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 6,
+ "publishedAt": 1759537812.1109097,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023012_1_2_0/message_text.txt b/test-chat/obj/m20251004-023012_1_2_0/message_text.txt
new file mode 100644
index 00000000..7efb03e5
--- /dev/null
+++ b/test-chat/obj/m20251004-023012_1_2_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 2/2**
+
+π¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023015_1_2_0/message.json b/test-chat/obj/m20251004-023015_1_2_0/message.json
new file mode 100644
index 00000000..825903f2
--- /dev/null
+++ b/test-chat/obj/m20251004-023015_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_a7140381-f959-481d-8689-1d6d38690945",
+ "workflowId": "ae1dc81f-f2c6-4776-b651-7b4a7fceaaa1",
+ "parentMessageId": null,
+ "message": "π― **Task 2/2**\n\nβ
Completed\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 8,
+ "publishedAt": 1759537815.861256,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023015_1_2_0/message_text.txt b/test-chat/obj/m20251004-023015_1_2_0/message_text.txt
new file mode 100644
index 00000000..e46b0f2c
--- /dev/null
+++ b/test-chat/obj/m20251004-023015_1_2_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 2/2**
+
+β
Completed
+π Score 8/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023015_1_2_1/message.json b/test-chat/obj/m20251004-023015_1_2_1/message.json
new file mode 100644
index 00000000..d4956f9d
--- /dev/null
+++ b/test-chat/obj/m20251004-023015_1_2_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_3a10e262-8a0d-403b-a778-a49d66947355",
+ "workflowId": "ae1dc81f-f2c6-4776-b651-7b4a7fceaaa1",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (document.generate)**\n\nβ Create and format Word document with prime numbers\n\nDocument list reference is required\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 7,
+ "publishedAt": 1759537815.7786386,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task2_action1_results",
+ "actionId": "action_7d5eba70-131a-4892-bac0-aad2f271547f",
+ "actionMethod": "document",
+ "actionName": "generate",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023015_1_2_1/message_text.txt b/test-chat/obj/m20251004-023015_1_2_1/message_text.txt
new file mode 100644
index 00000000..4cd0e2f3
--- /dev/null
+++ b/test-chat/obj/m20251004-023015_1_2_1/message_text.txt
@@ -0,0 +1,6 @@
+**Action 1/1 (document.generate)**
+
+β Create and format Word document with prime numbers
+
+Document list reference is required
+
diff --git a/test-chat/obj/m20251004-023016_1_0_0/message.json b/test-chat/obj/m20251004-023016_1_0_0/message.json
new file mode 100644
index 00000000..9f084c7f
--- /dev/null
+++ b/test-chat/obj/m20251004-023016_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_3582ce7c-58e0-4c46-b5f7-1cb01ba59d7c",
+ "workflowId": "ae1dc81f-f2c6-4776-b651-7b4a7fceaaa1",
+ "parentMessageId": null,
+ "message": "Workflow completed.\n\nProcessed 1 user inputs and generated 7 responses.\nWorkflow status: running",
+ "role": "assistant",
+ "status": "last",
+ "sequenceNr": 9,
+ "publishedAt": 1759537816.013338,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "workflow_feedback",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023016_1_0_0/message_text.txt b/test-chat/obj/m20251004-023016_1_0_0/message_text.txt
new file mode 100644
index 00000000..828f4bea
--- /dev/null
+++ b/test-chat/obj/m20251004-023016_1_0_0/message_text.txt
@@ -0,0 +1,4 @@
+Workflow completed.
+
+Processed 1 user inputs and generated 7 responses.
+Workflow status: running
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023238_1_0_0/message.json b/test-chat/obj/m20251004-023238_1_0_0/message.json
new file mode 100644
index 00000000..0077d613
--- /dev/null
+++ b/test-chat/obj/m20251004-023238_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_d53bbaf7-c9f3-4095-bc62-8819c73cfc7f",
+ "workflowId": "a7a831cc-a49d-415a-a066-81dd6fc8ac70",
+ "parentMessageId": null,
+ "message": "Gib mir die ersten 1000 Primzahlen in einem word dokument aus",
+ "role": "user",
+ "status": "first",
+ "sequenceNr": 1,
+ "publishedAt": 1759537958.4281359,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "round1_task0_action0_context",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023238_1_0_0/message_text.txt b/test-chat/obj/m20251004-023238_1_0_0/message_text.txt
new file mode 100644
index 00000000..2486bd22
--- /dev/null
+++ b/test-chat/obj/m20251004-023238_1_0_0/message_text.txt
@@ -0,0 +1 @@
+Gib mir die ersten 1000 Primzahlen in einem word dokument aus
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023243_1_1_0/message.json b/test-chat/obj/m20251004-023243_1_1_0/message.json
new file mode 100644
index 00000000..85019020
--- /dev/null
+++ b/test-chat/obj/m20251004-023243_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_294202bb-6c36-4b8d-bea9-cb66a95269b6",
+ "workflowId": "a7a831cc-a49d-415a-a066-81dd6fc8ac70",
+ "parentMessageId": null,
+ "message": "π **Task 1/2**\n\nπ¬ Berechne die ersten 1000 Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 3,
+ "publishedAt": 1759537963.7377448,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023243_1_1_0/message_text.txt b/test-chat/obj/m20251004-023243_1_1_0/message_text.txt
new file mode 100644
index 00000000..c4301ba7
--- /dev/null
+++ b/test-chat/obj/m20251004-023243_1_1_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 1/2**
+
+π¬ Berechne die ersten 1000 Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023246_1_1_1/message.json b/test-chat/obj/m20251004-023246_1_1_1/message.json
new file mode 100644
index 00000000..9be971b2
--- /dev/null
+++ b/test-chat/obj/m20251004-023246_1_1_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_200ec0ed-1bf4-4200-bcce-bfa76b6801db",
+ "workflowId": "a7a831cc-a49d-415a-a066-81dd6fc8ac70",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (ai.process)**\n\nβ Calculate and validate first 1000 prime numbers\n\nAI prompt is required\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 4,
+ "publishedAt": 1759537966.9700222,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task1_action1_results",
+ "actionId": "action_9a0aff99-efef-47ab-99a0-be98d48a390d",
+ "actionMethod": "ai",
+ "actionName": "process",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023246_1_1_1/message_text.txt b/test-chat/obj/m20251004-023246_1_1_1/message_text.txt
new file mode 100644
index 00000000..59b169a1
--- /dev/null
+++ b/test-chat/obj/m20251004-023246_1_1_1/message_text.txt
@@ -0,0 +1,6 @@
+**Action 1/1 (ai.process)**
+
+β Calculate and validate first 1000 prime numbers
+
+AI prompt is required
+
diff --git a/test-chat/obj/m20251004-023247_1_1_0/message.json b/test-chat/obj/m20251004-023247_1_1_0/message.json
new file mode 100644
index 00000000..1fbf677c
--- /dev/null
+++ b/test-chat/obj/m20251004-023247_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_2cff0edc-bf58-4337-9322-128fe6a9716a",
+ "workflowId": "a7a831cc-a49d-415a-a066-81dd6fc8ac70",
+ "parentMessageId": null,
+ "message": "π― **Task 1/2**\n\nβ
Completed\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 5,
+ "publishedAt": 1759537967.0297816,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023247_1_1_0/message_text.txt b/test-chat/obj/m20251004-023247_1_1_0/message_text.txt
new file mode 100644
index 00000000..ef9599dd
--- /dev/null
+++ b/test-chat/obj/m20251004-023247_1_1_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 1/2**
+
+β
Completed
+π Score 8/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023247_1_2_0/message.json b/test-chat/obj/m20251004-023247_1_2_0/message.json
new file mode 100644
index 00000000..5013477e
--- /dev/null
+++ b/test-chat/obj/m20251004-023247_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_74894310-373c-4510-9b19-bb02eae0cdf3",
+ "workflowId": "a7a831cc-a49d-415a-a066-81dd6fc8ac70",
+ "parentMessageId": null,
+ "message": "π **Task 2/2**\n\nπ¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 6,
+ "publishedAt": 1759537967.272046,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023247_1_2_0/message_text.txt b/test-chat/obj/m20251004-023247_1_2_0/message_text.txt
new file mode 100644
index 00000000..7efb03e5
--- /dev/null
+++ b/test-chat/obj/m20251004-023247_1_2_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 2/2**
+
+π¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023251_1_0_0/message.json b/test-chat/obj/m20251004-023251_1_0_0/message.json
new file mode 100644
index 00000000..cd5b802b
--- /dev/null
+++ b/test-chat/obj/m20251004-023251_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_0ddd5be4-43a6-4649-be55-94f9085cdeba",
+ "workflowId": "a7a831cc-a49d-415a-a066-81dd6fc8ac70",
+ "parentMessageId": null,
+ "message": "Workflow completed.\n\nProcessed 1 user inputs and generated 7 responses.\nWorkflow status: running",
+ "role": "assistant",
+ "status": "last",
+ "sequenceNr": 9,
+ "publishedAt": 1759537971.4807367,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "workflow_feedback",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023251_1_0_0/message_text.txt b/test-chat/obj/m20251004-023251_1_0_0/message_text.txt
new file mode 100644
index 00000000..828f4bea
--- /dev/null
+++ b/test-chat/obj/m20251004-023251_1_0_0/message_text.txt
@@ -0,0 +1,4 @@
+Workflow completed.
+
+Processed 1 user inputs and generated 7 responses.
+Workflow status: running
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023251_1_2_0/message.json b/test-chat/obj/m20251004-023251_1_2_0/message.json
new file mode 100644
index 00000000..f0035be4
--- /dev/null
+++ b/test-chat/obj/m20251004-023251_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_c17ce287-e28d-4388-a2df-025b2280b05d",
+ "workflowId": "a7a831cc-a49d-415a-a066-81dd6fc8ac70",
+ "parentMessageId": null,
+ "message": "π― **Task 2/2**\n\nβ
Completed\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 8,
+ "publishedAt": 1759537971.3301194,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023251_1_2_0/message_text.txt b/test-chat/obj/m20251004-023251_1_2_0/message_text.txt
new file mode 100644
index 00000000..e46b0f2c
--- /dev/null
+++ b/test-chat/obj/m20251004-023251_1_2_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 2/2**
+
+β
Completed
+π Score 8/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023251_1_2_1/message.json b/test-chat/obj/m20251004-023251_1_2_1/message.json
new file mode 100644
index 00000000..f30d7789
--- /dev/null
+++ b/test-chat/obj/m20251004-023251_1_2_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_b86d6ee2-2f17-441d-a8b1-39797a780062",
+ "workflowId": "a7a831cc-a49d-415a-a066-81dd6fc8ac70",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (document.generate)**\n\nβ Create and format Word document with prime numbers\n\nDocument list reference is required\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 7,
+ "publishedAt": 1759537971.2568488,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task2_action1_results",
+ "actionId": "action_218c22a4-2dec-40fe-9031-0e056ad8f927",
+ "actionMethod": "document",
+ "actionName": "generate",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023251_1_2_1/message_text.txt b/test-chat/obj/m20251004-023251_1_2_1/message_text.txt
new file mode 100644
index 00000000..4cd0e2f3
--- /dev/null
+++ b/test-chat/obj/m20251004-023251_1_2_1/message_text.txt
@@ -0,0 +1,6 @@
+**Action 1/1 (document.generate)**
+
+β Create and format Word document with prime numbers
+
+Document list reference is required
+
diff --git a/test-chat/obj/m20251004-023636_1_0_0/message.json b/test-chat/obj/m20251004-023636_1_0_0/message.json
new file mode 100644
index 00000000..38b39278
--- /dev/null
+++ b/test-chat/obj/m20251004-023636_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_27f6446a-9fc4-44ce-970f-07428b490675",
+ "workflowId": "f12ad0cc-387a-4ab0-8bc7-aa506becfbaa",
+ "parentMessageId": null,
+ "message": "Gib mir die ersten 1000 Primzahlen in einem word dokument aus",
+ "role": "user",
+ "status": "first",
+ "sequenceNr": 1,
+ "publishedAt": 1759538196.8761582,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "round1_task0_action0_context",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023636_1_0_0/message_text.txt b/test-chat/obj/m20251004-023636_1_0_0/message_text.txt
new file mode 100644
index 00000000..2486bd22
--- /dev/null
+++ b/test-chat/obj/m20251004-023636_1_0_0/message_text.txt
@@ -0,0 +1 @@
+Gib mir die ersten 1000 Primzahlen in einem word dokument aus
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023641_1_1_0/message.json b/test-chat/obj/m20251004-023641_1_1_0/message.json
new file mode 100644
index 00000000..9d1701da
--- /dev/null
+++ b/test-chat/obj/m20251004-023641_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_efd5a16d-3418-4991-a506-9eb8d2c8ebe9",
+ "workflowId": "f12ad0cc-387a-4ab0-8bc7-aa506becfbaa",
+ "parentMessageId": null,
+ "message": "π **Task Plan**\n\nIch werde die ersten 1000 Primzahlen berechnen und diese in einem formatierten Word-Dokument fΓΌr Sie ausgeben.\n\nπ¬ Berechne die ersten 1000 Primzahlen\n\nπ¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 2,
+ "publishedAt": 1759538201.899441,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_plan",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023641_1_1_0/message_text.txt b/test-chat/obj/m20251004-023641_1_1_0/message_text.txt
new file mode 100644
index 00000000..52d5cf37
--- /dev/null
+++ b/test-chat/obj/m20251004-023641_1_1_0/message_text.txt
@@ -0,0 +1,8 @@
+π **Task Plan**
+
+Ich werde die ersten 1000 Primzahlen berechnen und diese in einem formatierten Word-Dokument fΓΌr Sie ausgeben.
+
+π¬ Berechne die ersten 1000 Primzahlen
+
+π¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen
+
diff --git a/test-chat/obj/m20251004-023642_1_1_0/message.json b/test-chat/obj/m20251004-023642_1_1_0/message.json
new file mode 100644
index 00000000..859f59ad
--- /dev/null
+++ b/test-chat/obj/m20251004-023642_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_73116400-8085-4ea5-a75f-550e58ca8243",
+ "workflowId": "f12ad0cc-387a-4ab0-8bc7-aa506becfbaa",
+ "parentMessageId": null,
+ "message": "π **Task 1/2**\n\nπ¬ Berechne die ersten 1000 Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 3,
+ "publishedAt": 1759538202.0541372,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023642_1_1_0/message_text.txt b/test-chat/obj/m20251004-023642_1_1_0/message_text.txt
new file mode 100644
index 00000000..c4301ba7
--- /dev/null
+++ b/test-chat/obj/m20251004-023642_1_1_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 1/2**
+
+π¬ Berechne die ersten 1000 Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023645_1_1_0/message.json b/test-chat/obj/m20251004-023645_1_1_0/message.json
new file mode 100644
index 00000000..9702a81d
--- /dev/null
+++ b/test-chat/obj/m20251004-023645_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_b7e0cd23-e559-4033-94e3-644241e6a1b7",
+ "workflowId": "f12ad0cc-387a-4ab0-8bc7-aa506becfbaa",
+ "parentMessageId": null,
+ "message": "π― **Task 1/2**\n\nβ
Completed\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 5,
+ "publishedAt": 1759538205.1009216,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023645_1_1_0/message_text.txt b/test-chat/obj/m20251004-023645_1_1_0/message_text.txt
new file mode 100644
index 00000000..ef9599dd
--- /dev/null
+++ b/test-chat/obj/m20251004-023645_1_1_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 1/2**
+
+β
Completed
+π Score 8/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023645_1_1_1/message.json b/test-chat/obj/m20251004-023645_1_1_1/message.json
new file mode 100644
index 00000000..050c46e7
--- /dev/null
+++ b/test-chat/obj/m20251004-023645_1_1_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_95c8d6ca-4b90-4a08-99cf-212e3f795caa",
+ "workflowId": "f12ad0cc-387a-4ab0-8bc7-aa506becfbaa",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (ai.process)**\n\nβ Calculate and validate first 1000 prime numbers\n\nAI prompt is required\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 4,
+ "publishedAt": 1759538205.0437067,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task1_action1_results",
+ "actionId": "action_a4d71276-1fa8-4fca-b532-d36c5049a1ae",
+ "actionMethod": "ai",
+ "actionName": "process",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023645_1_1_1/message_text.txt b/test-chat/obj/m20251004-023645_1_1_1/message_text.txt
new file mode 100644
index 00000000..59b169a1
--- /dev/null
+++ b/test-chat/obj/m20251004-023645_1_1_1/message_text.txt
@@ -0,0 +1,6 @@
+**Action 1/1 (ai.process)**
+
+β Calculate and validate first 1000 prime numbers
+
+AI prompt is required
+
diff --git a/test-chat/obj/m20251004-023645_1_2_0/message.json b/test-chat/obj/m20251004-023645_1_2_0/message.json
new file mode 100644
index 00000000..fdcca6ca
--- /dev/null
+++ b/test-chat/obj/m20251004-023645_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_3bbf9b8c-bd44-47ad-85cb-54ff228c1c77",
+ "workflowId": "f12ad0cc-387a-4ab0-8bc7-aa506becfbaa",
+ "parentMessageId": null,
+ "message": "π **Task 2/2**\n\nπ¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 6,
+ "publishedAt": 1759538205.3493507,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023645_1_2_0/message_text.txt b/test-chat/obj/m20251004-023645_1_2_0/message_text.txt
new file mode 100644
index 00000000..7efb03e5
--- /dev/null
+++ b/test-chat/obj/m20251004-023645_1_2_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 2/2**
+
+π¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023648_1_0_0/message.json b/test-chat/obj/m20251004-023648_1_0_0/message.json
new file mode 100644
index 00000000..453f052b
--- /dev/null
+++ b/test-chat/obj/m20251004-023648_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_96801297-47cc-42b8-b153-59b72d729a5f",
+ "workflowId": "f12ad0cc-387a-4ab0-8bc7-aa506becfbaa",
+ "parentMessageId": null,
+ "message": "Workflow completed.\n\nProcessed 1 user inputs and generated 7 responses.\nWorkflow status: running",
+ "role": "assistant",
+ "status": "last",
+ "sequenceNr": 9,
+ "publishedAt": 1759538208.626633,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "workflow_feedback",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023648_1_0_0/message_text.txt b/test-chat/obj/m20251004-023648_1_0_0/message_text.txt
new file mode 100644
index 00000000..828f4bea
--- /dev/null
+++ b/test-chat/obj/m20251004-023648_1_0_0/message_text.txt
@@ -0,0 +1,4 @@
+Workflow completed.
+
+Processed 1 user inputs and generated 7 responses.
+Workflow status: running
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023648_1_2_0/message.json b/test-chat/obj/m20251004-023648_1_2_0/message.json
new file mode 100644
index 00000000..e50242b0
--- /dev/null
+++ b/test-chat/obj/m20251004-023648_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_4ecca97c-1aa8-48f0-a02a-193057d15238",
+ "workflowId": "f12ad0cc-387a-4ab0-8bc7-aa506becfbaa",
+ "parentMessageId": null,
+ "message": "π― **Task 2/2**\n\nβ
Completed\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 8,
+ "publishedAt": 1759538208.4565256,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023648_1_2_0/message_text.txt b/test-chat/obj/m20251004-023648_1_2_0/message_text.txt
new file mode 100644
index 00000000..e46b0f2c
--- /dev/null
+++ b/test-chat/obj/m20251004-023648_1_2_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 2/2**
+
+β
Completed
+π Score 8/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023648_1_2_1/message.json b/test-chat/obj/m20251004-023648_1_2_1/message.json
new file mode 100644
index 00000000..9a9961eb
--- /dev/null
+++ b/test-chat/obj/m20251004-023648_1_2_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_b6806350-eee6-4d9f-a540-579b86574ed9",
+ "workflowId": "f12ad0cc-387a-4ab0-8bc7-aa506becfbaa",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (document.generate)**\n\nβ Create and format Word document with prime numbers\n\nDocument list reference is required\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 7,
+ "publishedAt": 1759538208.3826365,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task2_action1_results",
+ "actionId": "action_bf2a4546-1f71-4ca0-bf46-cf6adc206c1a",
+ "actionMethod": "document",
+ "actionName": "generate",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023648_1_2_1/message_text.txt b/test-chat/obj/m20251004-023648_1_2_1/message_text.txt
new file mode 100644
index 00000000..4cd0e2f3
--- /dev/null
+++ b/test-chat/obj/m20251004-023648_1_2_1/message_text.txt
@@ -0,0 +1,6 @@
+**Action 1/1 (document.generate)**
+
+β Create and format Word document with prime numbers
+
+Document list reference is required
+
diff --git a/test-chat/obj/m20251004-023929_1_0_0/message.json b/test-chat/obj/m20251004-023929_1_0_0/message.json
new file mode 100644
index 00000000..bf6cd84b
--- /dev/null
+++ b/test-chat/obj/m20251004-023929_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_7ef08d97-ade4-4afb-8b77-7957afce0e06",
+ "workflowId": "7571f952-3c9d-4cf2-b74c-dcee0f0052ca",
+ "parentMessageId": null,
+ "message": "Gib mir die ersten 1000 Primzahlen in einem word dokument aus",
+ "role": "user",
+ "status": "first",
+ "sequenceNr": 1,
+ "publishedAt": 1759538369.6305652,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "round1_task0_action0_context",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023929_1_0_0/message_text.txt b/test-chat/obj/m20251004-023929_1_0_0/message_text.txt
new file mode 100644
index 00000000..2486bd22
--- /dev/null
+++ b/test-chat/obj/m20251004-023929_1_0_0/message_text.txt
@@ -0,0 +1 @@
+Gib mir die ersten 1000 Primzahlen in einem word dokument aus
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023935_1_1_0/message.json b/test-chat/obj/m20251004-023935_1_1_0/message.json
new file mode 100644
index 00000000..c7e12eb0
--- /dev/null
+++ b/test-chat/obj/m20251004-023935_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_e798a077-1844-4488-b692-e3b76f558d03",
+ "workflowId": "7571f952-3c9d-4cf2-b74c-dcee0f0052ca",
+ "parentMessageId": null,
+ "message": "π **Task 1/2**\n\nπ¬ Berechne die ersten 1000 Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 3,
+ "publishedAt": 1759538375.2581089,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023935_1_1_0/message_text.txt b/test-chat/obj/m20251004-023935_1_1_0/message_text.txt
new file mode 100644
index 00000000..c4301ba7
--- /dev/null
+++ b/test-chat/obj/m20251004-023935_1_1_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 1/2**
+
+π¬ Berechne die ersten 1000 Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023939_1_1_0/message.json b/test-chat/obj/m20251004-023939_1_1_0/message.json
new file mode 100644
index 00000000..66ad76a4
--- /dev/null
+++ b/test-chat/obj/m20251004-023939_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_a93a196a-ac1b-4617-a418-5c0fcd0fac04",
+ "workflowId": "7571f952-3c9d-4cf2-b74c-dcee0f0052ca",
+ "parentMessageId": null,
+ "message": "π― **Task 1/2**\n\nβ
Completed\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 5,
+ "publishedAt": 1759538379.1466675,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023939_1_1_0/message_text.txt b/test-chat/obj/m20251004-023939_1_1_0/message_text.txt
new file mode 100644
index 00000000..ef9599dd
--- /dev/null
+++ b/test-chat/obj/m20251004-023939_1_1_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 1/2**
+
+β
Completed
+π Score 8/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023939_1_1_1/message.json b/test-chat/obj/m20251004-023939_1_1_1/message.json
new file mode 100644
index 00000000..f38d6083
--- /dev/null
+++ b/test-chat/obj/m20251004-023939_1_1_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_412b13b0-b188-4562-9dcb-f57f45f7b685",
+ "workflowId": "7571f952-3c9d-4cf2-b74c-dcee0f0052ca",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (ai.process)**\n\nβ Calculate and validate first 1000 prime numbers\n\nAI prompt is required\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 4,
+ "publishedAt": 1759538379.086181,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task1_action1_results",
+ "actionId": "action_d0be5229-e26e-49be-8a8b-a7d901e2a58b",
+ "actionMethod": "ai",
+ "actionName": "process",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023939_1_1_1/message_text.txt b/test-chat/obj/m20251004-023939_1_1_1/message_text.txt
new file mode 100644
index 00000000..59b169a1
--- /dev/null
+++ b/test-chat/obj/m20251004-023939_1_1_1/message_text.txt
@@ -0,0 +1,6 @@
+**Action 1/1 (ai.process)**
+
+β Calculate and validate first 1000 prime numbers
+
+AI prompt is required
+
diff --git a/test-chat/obj/m20251004-023939_1_2_0/message.json b/test-chat/obj/m20251004-023939_1_2_0/message.json
new file mode 100644
index 00000000..22489ab1
--- /dev/null
+++ b/test-chat/obj/m20251004-023939_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_1509daa0-548f-44f2-8d91-98d01206f88d",
+ "workflowId": "7571f952-3c9d-4cf2-b74c-dcee0f0052ca",
+ "parentMessageId": null,
+ "message": "π **Task 2/2**\n\nπ¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 6,
+ "publishedAt": 1759538379.3843105,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023939_1_2_0/message_text.txt b/test-chat/obj/m20251004-023939_1_2_0/message_text.txt
new file mode 100644
index 00000000..7efb03e5
--- /dev/null
+++ b/test-chat/obj/m20251004-023939_1_2_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 2/2**
+
+π¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023942_1_0_0/message.json b/test-chat/obj/m20251004-023942_1_0_0/message.json
new file mode 100644
index 00000000..1a81d873
--- /dev/null
+++ b/test-chat/obj/m20251004-023942_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_4f6d07fe-6a54-450b-b427-9efd79d420d4",
+ "workflowId": "7571f952-3c9d-4cf2-b74c-dcee0f0052ca",
+ "parentMessageId": null,
+ "message": "Workflow completed.\n\nProcessed 1 user inputs and generated 7 responses.\nWorkflow status: running",
+ "role": "assistant",
+ "status": "last",
+ "sequenceNr": 9,
+ "publishedAt": 1759538382.2573001,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "workflow_feedback",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023942_1_0_0/message_text.txt b/test-chat/obj/m20251004-023942_1_0_0/message_text.txt
new file mode 100644
index 00000000..828f4bea
--- /dev/null
+++ b/test-chat/obj/m20251004-023942_1_0_0/message_text.txt
@@ -0,0 +1,4 @@
+Workflow completed.
+
+Processed 1 user inputs and generated 7 responses.
+Workflow status: running
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023942_1_2_0/message.json b/test-chat/obj/m20251004-023942_1_2_0/message.json
new file mode 100644
index 00000000..3a864f44
--- /dev/null
+++ b/test-chat/obj/m20251004-023942_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_742dd538-2a99-4766-bbc3-ab85e03430a9",
+ "workflowId": "7571f952-3c9d-4cf2-b74c-dcee0f0052ca",
+ "parentMessageId": null,
+ "message": "π― **Task 2/2**\n\nβ
Completed\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 8,
+ "publishedAt": 1759538382.1228,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023942_1_2_0/message_text.txt b/test-chat/obj/m20251004-023942_1_2_0/message_text.txt
new file mode 100644
index 00000000..e46b0f2c
--- /dev/null
+++ b/test-chat/obj/m20251004-023942_1_2_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 2/2**
+
+β
Completed
+π Score 8/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023942_1_2_1/message.json b/test-chat/obj/m20251004-023942_1_2_1/message.json
new file mode 100644
index 00000000..2fb12175
--- /dev/null
+++ b/test-chat/obj/m20251004-023942_1_2_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_70cc9b95-714d-4cbe-8ebb-eafd5c90e2ad",
+ "workflowId": "7571f952-3c9d-4cf2-b74c-dcee0f0052ca",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (document.generate)**\n\nβ Create and format Word document with prime numbers\n\nDocument list reference is required\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 7,
+ "publishedAt": 1759538382.0531425,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task2_action1_results",
+ "actionId": "action_d0fec1b3-a00b-407e-908e-6b94935c3a3a",
+ "actionMethod": "document",
+ "actionName": "generate",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-023942_1_2_1/message_text.txt b/test-chat/obj/m20251004-023942_1_2_1/message_text.txt
new file mode 100644
index 00000000..4cd0e2f3
--- /dev/null
+++ b/test-chat/obj/m20251004-023942_1_2_1/message_text.txt
@@ -0,0 +1,6 @@
+**Action 1/1 (document.generate)**
+
+β Create and format Word document with prime numbers
+
+Document list reference is required
+
diff --git a/test-chat/obj/m20251004-024459_1_0_0/message.json b/test-chat/obj/m20251004-024459_1_0_0/message.json
new file mode 100644
index 00000000..f519cead
--- /dev/null
+++ b/test-chat/obj/m20251004-024459_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_84b338e9-ef18-42e7-a321-700a3282a1d5",
+ "workflowId": "cccdd0dd-0beb-471d-afec-cc25f28f61d0",
+ "parentMessageId": null,
+ "message": "Gib mir die ersten 1000 Primzahlen in einem word dokument aus",
+ "role": "user",
+ "status": "first",
+ "sequenceNr": 1,
+ "publishedAt": 1759538699.126442,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "round1_task0_action0_context",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024459_1_0_0/message_text.txt b/test-chat/obj/m20251004-024459_1_0_0/message_text.txt
new file mode 100644
index 00000000..2486bd22
--- /dev/null
+++ b/test-chat/obj/m20251004-024459_1_0_0/message_text.txt
@@ -0,0 +1 @@
+Gib mir die ersten 1000 Primzahlen in einem word dokument aus
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024504_1_1_0/message.json b/test-chat/obj/m20251004-024504_1_1_0/message.json
new file mode 100644
index 00000000..17b5e55f
--- /dev/null
+++ b/test-chat/obj/m20251004-024504_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_5dacb440-ee92-45e3-9c3a-81a1f694f151",
+ "workflowId": "cccdd0dd-0beb-471d-afec-cc25f28f61d0",
+ "parentMessageId": null,
+ "message": "π **Task 1/2**\n\nπ¬ Berechne die ersten 1000 Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 3,
+ "publishedAt": 1759538704.5807652,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024504_1_1_0/message_text.txt b/test-chat/obj/m20251004-024504_1_1_0/message_text.txt
new file mode 100644
index 00000000..c4301ba7
--- /dev/null
+++ b/test-chat/obj/m20251004-024504_1_1_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 1/2**
+
+π¬ Berechne die ersten 1000 Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024540_1_1_0/message.json b/test-chat/obj/m20251004-024540_1_1_0/message.json
new file mode 100644
index 00000000..90354bc5
--- /dev/null
+++ b/test-chat/obj/m20251004-024540_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_c21f9fc1-715f-4d83-8390-8a56e7617f22",
+ "workflowId": "cccdd0dd-0beb-471d-afec-cc25f28f61d0",
+ "parentMessageId": null,
+ "message": "π― **Task 1/2**\n\nβ
Completed\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 5,
+ "publishedAt": 1759538740.4817355,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024540_1_1_0/message_text.txt b/test-chat/obj/m20251004-024540_1_1_0/message_text.txt
new file mode 100644
index 00000000..ef9599dd
--- /dev/null
+++ b/test-chat/obj/m20251004-024540_1_1_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 1/2**
+
+β
Completed
+π Score 8/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024540_1_1_1/message.json b/test-chat/obj/m20251004-024540_1_1_1/message.json
new file mode 100644
index 00000000..9c9608e7
--- /dev/null
+++ b/test-chat/obj/m20251004-024540_1_1_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_7d047e4e-81b5-4242-8c4f-f57a169a0b65",
+ "workflowId": "cccdd0dd-0beb-471d-afec-cc25f28f61d0",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (ai.process)**\n\nβ
Calculate and validate first 1000 prime numbers\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 4,
+ "publishedAt": 1759538740.4192836,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task1_action1_results",
+ "actionId": "action_f7b26203-31dd-4015-bf1a-a2bdb49abb46",
+ "actionMethod": "ai",
+ "actionName": "process",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024540_1_1_1/message_text.txt b/test-chat/obj/m20251004-024540_1_1_1/message_text.txt
new file mode 100644
index 00000000..5901cd49
--- /dev/null
+++ b/test-chat/obj/m20251004-024540_1_1_1/message_text.txt
@@ -0,0 +1,4 @@
+**Action 1/1 (ai.process)**
+
+β
Calculate and validate first 1000 prime numbers
+
diff --git a/test-chat/obj/m20251004-024540_1_1_1/round1_task1_action1_results/document_001_metadata.json b/test-chat/obj/m20251004-024540_1_1_1/round1_task1_action1_results/document_001_metadata.json
new file mode 100644
index 00000000..43db3d96
--- /dev/null
+++ b/test-chat/obj/m20251004-024540_1_1_1/round1_task1_action1_results/document_001_metadata.json
@@ -0,0 +1,12 @@
+{
+ "id": "4dba6590-c235-41d1-bc0d-4611cd162fc7",
+ "messageId": "msg_7d047e4e-81b5-4242-8c4f-f57a169a0b65",
+ "fileId": "de7627b8-aa97-43b2-b05c-13f126e876fb",
+ "fileName": "ai_result_r0t0a0_97.txt",
+ "fileSize": 3193,
+ "mimeType": "text/plain",
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "actionId": "action_f7b26203-31dd-4015-bf1a-a2bdb49abb46"
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024540_1_2_0/message.json b/test-chat/obj/m20251004-024540_1_2_0/message.json
new file mode 100644
index 00000000..10a6830a
--- /dev/null
+++ b/test-chat/obj/m20251004-024540_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_04725083-cf4a-423d-8e66-d5e7620c6ea7",
+ "workflowId": "cccdd0dd-0beb-471d-afec-cc25f28f61d0",
+ "parentMessageId": null,
+ "message": "π **Task 2/2**\n\nπ¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 6,
+ "publishedAt": 1759538740.729413,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024540_1_2_0/message_text.txt b/test-chat/obj/m20251004-024540_1_2_0/message_text.txt
new file mode 100644
index 00000000..7efb03e5
--- /dev/null
+++ b/test-chat/obj/m20251004-024540_1_2_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 2/2**
+
+π¬ Erstelle ein formatiertes Word-Dokument mit den Primzahlen
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024543_1_2_1/message.json b/test-chat/obj/m20251004-024543_1_2_1/message.json
new file mode 100644
index 00000000..7c63059e
--- /dev/null
+++ b/test-chat/obj/m20251004-024543_1_2_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_68a3274d-9e8a-476d-b964-238aeefeaf24",
+ "workflowId": "cccdd0dd-0beb-471d-afec-cc25f28f61d0",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (document.generate)**\n\nβ Create and format Word document with prime numbers\n\nDocument list reference is required\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 7,
+ "publishedAt": 1759538743.9234207,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task2_action1_results",
+ "actionId": "action_6bd52502-ad2d-4bef-a561-359783148c98",
+ "actionMethod": "document",
+ "actionName": "generate",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024543_1_2_1/message_text.txt b/test-chat/obj/m20251004-024543_1_2_1/message_text.txt
new file mode 100644
index 00000000..4cd0e2f3
--- /dev/null
+++ b/test-chat/obj/m20251004-024543_1_2_1/message_text.txt
@@ -0,0 +1,6 @@
+**Action 1/1 (document.generate)**
+
+β Create and format Word document with prime numbers
+
+Document list reference is required
+
diff --git a/test-chat/obj/m20251004-024544_1_0_0/message.json b/test-chat/obj/m20251004-024544_1_0_0/message.json
new file mode 100644
index 00000000..3456d4b4
--- /dev/null
+++ b/test-chat/obj/m20251004-024544_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_2cf6c0e5-7b17-4b68-9e38-53680fd756fc",
+ "workflowId": "cccdd0dd-0beb-471d-afec-cc25f28f61d0",
+ "parentMessageId": null,
+ "message": "Workflow completed.\n\nProcessed 1 user inputs and generated 7 responses.\nWorkflow status: running",
+ "role": "assistant",
+ "status": "last",
+ "sequenceNr": 9,
+ "publishedAt": 1759538744.1620197,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "workflow_feedback",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024544_1_0_0/message_text.txt b/test-chat/obj/m20251004-024544_1_0_0/message_text.txt
new file mode 100644
index 00000000..828f4bea
--- /dev/null
+++ b/test-chat/obj/m20251004-024544_1_0_0/message_text.txt
@@ -0,0 +1,4 @@
+Workflow completed.
+
+Processed 1 user inputs and generated 7 responses.
+Workflow status: running
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024544_1_2_0/message.json b/test-chat/obj/m20251004-024544_1_2_0/message.json
new file mode 100644
index 00000000..e21904e1
--- /dev/null
+++ b/test-chat/obj/m20251004-024544_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_e281257e-3b6d-4555-b160-19aa76d2a96b",
+ "workflowId": "cccdd0dd-0beb-471d-afec-cc25f28f61d0",
+ "parentMessageId": null,
+ "message": "π― **Task 2/2**\n\nβ
Completed\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 8,
+ "publishedAt": 1759538744.000259,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024544_1_2_0/message_text.txt b/test-chat/obj/m20251004-024544_1_2_0/message_text.txt
new file mode 100644
index 00000000..e46b0f2c
--- /dev/null
+++ b/test-chat/obj/m20251004-024544_1_2_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 2/2**
+
+β
Completed
+π Score 8/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024704_1_0_0/message.json b/test-chat/obj/m20251004-024704_1_0_0/message.json
new file mode 100644
index 00000000..6e5f6e0f
--- /dev/null
+++ b/test-chat/obj/m20251004-024704_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_02bb93f8-a8b4-48f3-acc6-6dd6ec656dd6",
+ "workflowId": "22937d42-31a0-4167-ab3d-623e43191e1e",
+ "parentMessageId": null,
+ "message": "Gib mir die ersten 1000 Primzahlen in einem word dokument aus",
+ "role": "user",
+ "status": "first",
+ "sequenceNr": 1,
+ "publishedAt": 1759538824.813388,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "round1_task0_action0_context",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024704_1_0_0/message_text.txt b/test-chat/obj/m20251004-024704_1_0_0/message_text.txt
new file mode 100644
index 00000000..2486bd22
--- /dev/null
+++ b/test-chat/obj/m20251004-024704_1_0_0/message_text.txt
@@ -0,0 +1 @@
+Gib mir die ersten 1000 Primzahlen in einem word dokument aus
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024709_1_1_0/message.json b/test-chat/obj/m20251004-024709_1_1_0/message.json
new file mode 100644
index 00000000..c97606d8
--- /dev/null
+++ b/test-chat/obj/m20251004-024709_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_032cb2ca-3266-4db2-a358-4fcd100b24d7",
+ "workflowId": "22937d42-31a0-4167-ab3d-623e43191e1e",
+ "parentMessageId": null,
+ "message": "π **Task 1/2**\n\nπ¬ Generating the list of the first 1000 prime numbers",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 3,
+ "publishedAt": 1759538829.86202,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024709_1_1_0/message_text.txt b/test-chat/obj/m20251004-024709_1_1_0/message_text.txt
new file mode 100644
index 00000000..1e94ddff
--- /dev/null
+++ b/test-chat/obj/m20251004-024709_1_1_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 1/2**
+
+π¬ Generating the list of the first 1000 prime numbers
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024746_1_1_1/message.json b/test-chat/obj/m20251004-024746_1_1_1/message.json
new file mode 100644
index 00000000..86182784
--- /dev/null
+++ b/test-chat/obj/m20251004-024746_1_1_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_6707d940-f5da-4e1d-9e15-10ca7ef663db",
+ "workflowId": "22937d42-31a0-4167-ab3d-623e43191e1e",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (ai.process)**\n\nβ
Calculate and validate the first 1000 prime numbers\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 4,
+ "publishedAt": 1759538866.5904648,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task1_action1_results",
+ "actionId": "action_e19a503f-993b-4644-a2be-966d302ccc67",
+ "actionMethod": "ai",
+ "actionName": "process",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024746_1_1_1/message_text.txt b/test-chat/obj/m20251004-024746_1_1_1/message_text.txt
new file mode 100644
index 00000000..ee400c7a
--- /dev/null
+++ b/test-chat/obj/m20251004-024746_1_1_1/message_text.txt
@@ -0,0 +1,4 @@
+**Action 1/1 (ai.process)**
+
+β
Calculate and validate the first 1000 prime numbers
+
diff --git a/test-chat/obj/m20251004-024746_1_1_1/round1_task1_action1_results/document_001_metadata.json b/test-chat/obj/m20251004-024746_1_1_1/round1_task1_action1_results/document_001_metadata.json
new file mode 100644
index 00000000..65bfe408
--- /dev/null
+++ b/test-chat/obj/m20251004-024746_1_1_1/round1_task1_action1_results/document_001_metadata.json
@@ -0,0 +1,12 @@
+{
+ "id": "c2537333-8433-49da-b7f1-94b733375188",
+ "messageId": "msg_6707d940-f5da-4e1d-9e15-10ca7ef663db",
+ "fileId": "111c6b08-07c1-40fa-b594-8b52843cfce0",
+ "fileName": "ai_result_r0t0a0_98.txt",
+ "fileSize": 3185,
+ "mimeType": "text/plain",
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 1,
+ "actionId": "action_e19a503f-993b-4644-a2be-966d302ccc67"
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024749_1_1_0/message.json b/test-chat/obj/m20251004-024749_1_1_0/message.json
new file mode 100644
index 00000000..bdf3a264
--- /dev/null
+++ b/test-chat/obj/m20251004-024749_1_1_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_81108127-3512-40da-86fe-4f171fea49d8",
+ "workflowId": "22937d42-31a0-4167-ab3d-623e43191e1e",
+ "parentMessageId": null,
+ "message": "π― **Task 1/2**\n\nβ
default\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 5,
+ "publishedAt": 1759538869.9300551,
+ "roundNumber": 1,
+ "taskNumber": 1,
+ "actionNumber": 0,
+ "documentsLabel": "task_1_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024749_1_1_0/message_text.txt b/test-chat/obj/m20251004-024749_1_1_0/message_text.txt
new file mode 100644
index 00000000..eb934762
--- /dev/null
+++ b/test-chat/obj/m20251004-024749_1_1_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 1/2**
+
+β
default
+π Score 8/10
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024750_1_2_0/message.json b/test-chat/obj/m20251004-024750_1_2_0/message.json
new file mode 100644
index 00000000..8fba7084
--- /dev/null
+++ b/test-chat/obj/m20251004-024750_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_455972f5-fa7d-4039-944f-2ebcfc4fdc3e",
+ "workflowId": "22937d42-31a0-4167-ab3d-623e43191e1e",
+ "parentMessageId": null,
+ "message": "π **Task 2/2**\n\nπ¬ Creating a well-formatted Word document containing the prime numbers",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 6,
+ "publishedAt": 1759538870.1842601,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_start",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024750_1_2_0/message_text.txt b/test-chat/obj/m20251004-024750_1_2_0/message_text.txt
new file mode 100644
index 00000000..a886f847
--- /dev/null
+++ b/test-chat/obj/m20251004-024750_1_2_0/message_text.txt
@@ -0,0 +1,3 @@
+π **Task 2/2**
+
+π¬ Creating a well-formatted Word document containing the prime numbers
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024754_1_2_1/message.json b/test-chat/obj/m20251004-024754_1_2_1/message.json
new file mode 100644
index 00000000..0e152053
--- /dev/null
+++ b/test-chat/obj/m20251004-024754_1_2_1/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_4a6a7ed3-793b-46bd-a862-d1bdb0421e77",
+ "workflowId": "22937d42-31a0-4167-ab3d-623e43191e1e",
+ "parentMessageId": null,
+ "message": "**Action 1/1 (document.generate)**\n\nβ Create and format Word document with prime numbers\n\nDocument list reference is required\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 7,
+ "publishedAt": 1759538874.460911,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 1,
+ "documentsLabel": "round1_task2_action1_results",
+ "actionId": "action_2658fb1d-84d6-4114-b2b5-ffbc047a5cc3",
+ "actionMethod": "document",
+ "actionName": "generate",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024754_1_2_1/message_text.txt b/test-chat/obj/m20251004-024754_1_2_1/message_text.txt
new file mode 100644
index 00000000..4cd0e2f3
--- /dev/null
+++ b/test-chat/obj/m20251004-024754_1_2_1/message_text.txt
@@ -0,0 +1,6 @@
+**Action 1/1 (document.generate)**
+
+β Create and format Word document with prime numbers
+
+Document list reference is required
+
diff --git a/test-chat/obj/m20251004-024759_1_2_2/message.json b/test-chat/obj/m20251004-024759_1_2_2/message.json
new file mode 100644
index 00000000..7fe541c8
--- /dev/null
+++ b/test-chat/obj/m20251004-024759_1_2_2/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_3dcecd7a-e361-4de7-9665-668bfbbd9cca",
+ "workflowId": "22937d42-31a0-4167-ab3d-623e43191e1e",
+ "parentMessageId": null,
+ "message": "**Action 2/1 (document.generate)**\n\nβ Create and format Word document with prime numbers\n\nDocument list reference is required\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 8,
+ "publishedAt": 1759538879.5315428,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 2,
+ "documentsLabel": "round1_task2_action2_results",
+ "actionId": "action_fe93defe-de3d-419a-bf78-e9dfa52e880d",
+ "actionMethod": "document",
+ "actionName": "generate",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024759_1_2_2/message_text.txt b/test-chat/obj/m20251004-024759_1_2_2/message_text.txt
new file mode 100644
index 00000000..08d81c76
--- /dev/null
+++ b/test-chat/obj/m20251004-024759_1_2_2/message_text.txt
@@ -0,0 +1,6 @@
+**Action 2/1 (document.generate)**
+
+β Create and format Word document with prime numbers
+
+Document list reference is required
+
diff --git a/test-chat/obj/m20251004-024803_1_2_3/message.json b/test-chat/obj/m20251004-024803_1_2_3/message.json
new file mode 100644
index 00000000..7d6e79bf
--- /dev/null
+++ b/test-chat/obj/m20251004-024803_1_2_3/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_81ef7166-0c00-4071-b30e-0955e72e3b4c",
+ "workflowId": "22937d42-31a0-4167-ab3d-623e43191e1e",
+ "parentMessageId": null,
+ "message": "**Action 3/1 (document.generate)**\n\nβ Create and format Word document with prime numbers\n\nDocument list reference is required\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 9,
+ "publishedAt": 1759538883.608874,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 3,
+ "documentsLabel": "round1_task2_action3_results",
+ "actionId": "action_058fcf5c-875c-4fef-8c2e-fbbbc0975ec5",
+ "actionMethod": "document",
+ "actionName": "generate",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024803_1_2_3/message_text.txt b/test-chat/obj/m20251004-024803_1_2_3/message_text.txt
new file mode 100644
index 00000000..d5ccc1b0
--- /dev/null
+++ b/test-chat/obj/m20251004-024803_1_2_3/message_text.txt
@@ -0,0 +1,6 @@
+**Action 3/1 (document.generate)**
+
+β Create and format Word document with prime numbers
+
+Document list reference is required
+
diff --git a/test-chat/obj/m20251004-024807_1_2_4/message.json b/test-chat/obj/m20251004-024807_1_2_4/message.json
new file mode 100644
index 00000000..a71c250d
--- /dev/null
+++ b/test-chat/obj/m20251004-024807_1_2_4/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_ec34027a-3eed-43b7-9023-3ccc4516d974",
+ "workflowId": "22937d42-31a0-4167-ab3d-623e43191e1e",
+ "parentMessageId": null,
+ "message": "**Action 4/1 (document.generate)**\n\nβ Create and format Word document with prime numbers\n\nDocument list reference is required\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 10,
+ "publishedAt": 1759538887.7857635,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 4,
+ "documentsLabel": "round1_task2_action4_results",
+ "actionId": "action_2dcc9a30-dedf-4414-ba52-d96dcdf364a9",
+ "actionMethod": "document",
+ "actionName": "generate",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024807_1_2_4/message_text.txt b/test-chat/obj/m20251004-024807_1_2_4/message_text.txt
new file mode 100644
index 00000000..28a8f754
--- /dev/null
+++ b/test-chat/obj/m20251004-024807_1_2_4/message_text.txt
@@ -0,0 +1,6 @@
+**Action 4/1 (document.generate)**
+
+β Create and format Word document with prime numbers
+
+Document list reference is required
+
diff --git a/test-chat/obj/m20251004-024812_1_2_5/message.json b/test-chat/obj/m20251004-024812_1_2_5/message.json
new file mode 100644
index 00000000..a87c22cd
--- /dev/null
+++ b/test-chat/obj/m20251004-024812_1_2_5/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_06db0a4f-51a0-46df-ae5b-6ff0e148ac68",
+ "workflowId": "22937d42-31a0-4167-ab3d-623e43191e1e",
+ "parentMessageId": null,
+ "message": "**Action 5/1 (document.generate)**\n\nβ Create and format Word document with prime numbers\n\nDocument list reference is required\n\n",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 11,
+ "publishedAt": 1759538892.2379243,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 5,
+ "documentsLabel": "round1_task2_action5_results",
+ "actionId": "action_d2fc2245-efb3-4de9-84cf-0d61c0d70520",
+ "actionMethod": "document",
+ "actionName": "generate",
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024812_1_2_5/message_text.txt b/test-chat/obj/m20251004-024812_1_2_5/message_text.txt
new file mode 100644
index 00000000..b200da45
--- /dev/null
+++ b/test-chat/obj/m20251004-024812_1_2_5/message_text.txt
@@ -0,0 +1,6 @@
+**Action 5/1 (document.generate)**
+
+β Create and format Word document with prime numbers
+
+Document list reference is required
+
diff --git a/test-chat/obj/m20251004-024813_1_0_0/message.json b/test-chat/obj/m20251004-024813_1_0_0/message.json
new file mode 100644
index 00000000..1c35a636
--- /dev/null
+++ b/test-chat/obj/m20251004-024813_1_0_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_7e1eabbe-4daf-4957-9b15-3b829ee3cfe9",
+ "workflowId": "22937d42-31a0-4167-ab3d-623e43191e1e",
+ "parentMessageId": null,
+ "message": "Workflow completed.\n\nProcessed 1 user inputs and generated 11 responses.\nWorkflow status: running",
+ "role": "assistant",
+ "status": "last",
+ "sequenceNr": 13,
+ "publishedAt": 1759538893.5467033,
+ "roundNumber": 1,
+ "taskNumber": 0,
+ "actionNumber": 0,
+ "documentsLabel": "workflow_feedback",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024813_1_0_0/message_text.txt b/test-chat/obj/m20251004-024813_1_0_0/message_text.txt
new file mode 100644
index 00000000..7000823d
--- /dev/null
+++ b/test-chat/obj/m20251004-024813_1_0_0/message_text.txt
@@ -0,0 +1,4 @@
+Workflow completed.
+
+Processed 1 user inputs and generated 11 responses.
+Workflow status: running
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024813_1_2_0/message.json b/test-chat/obj/m20251004-024813_1_2_0/message.json
new file mode 100644
index 00000000..9269c4af
--- /dev/null
+++ b/test-chat/obj/m20251004-024813_1_2_0/message.json
@@ -0,0 +1,19 @@
+{
+ "id": "msg_58a3f47c-56cb-4c30-8a28-ca4c4334fc20",
+ "workflowId": "22937d42-31a0-4167-ab3d-623e43191e1e",
+ "parentMessageId": null,
+ "message": "π― **Task 2/2**\n\nβ
The objective to create and format a Word document with prime numbers has not been fulfilled as no documents were created.\nπ Score 8/10",
+ "role": "assistant",
+ "status": "step",
+ "sequenceNr": 12,
+ "publishedAt": 1759538893.3143883,
+ "roundNumber": 1,
+ "taskNumber": 2,
+ "actionNumber": 0,
+ "documentsLabel": "task_2_completion",
+ "actionId": null,
+ "actionMethod": null,
+ "actionName": null,
+ "success": null,
+ "documents": []
+}
\ No newline at end of file
diff --git a/test-chat/obj/m20251004-024813_1_2_0/message_text.txt b/test-chat/obj/m20251004-024813_1_2_0/message_text.txt
new file mode 100644
index 00000000..9faa4370
--- /dev/null
+++ b/test-chat/obj/m20251004-024813_1_2_0/message_text.txt
@@ -0,0 +1,4 @@
+π― **Task 2/2**
+
+β
The objective to create and format a Word document with prime numbers has not been fulfilled as no documents were created.
+π Score 8/10
\ No newline at end of file