From 4f7bba5f333a3c4723dfc82aa72c2b07637e3bd0 Mon Sep 17 00:00:00 2001
From: ValueOn AG
Date: Fri, 24 Oct 2025 21:42:37 +0200
Subject: [PATCH] module test for dynamic centralized ai calling system rev 3
tested
---
modules/services/serviceAi/mainServiceAi.py | 6 +-
modules/services/serviceAi/subCoreAi.py | 108 ++++++++++++++++--
.../mainServiceNormalization.py | 2 +-
.../serviceWorkflow/mainServiceWorkflow.py | 18 +--
modules/workflows/methods/methodAi.py | 39 +------
.../processing/adaptive/contentValidator.py | 7 +-
.../processing/adaptive/intentAnalyzer.py | 7 +-
.../workflows/processing/core/taskPlanner.py | 4 +-
.../processing/modes/modeActionplan.py | 4 +-
.../workflows/processing/modes/modeReact.py | 66 ++---------
modules/workflows/workflowManager.py | 4 +-
test_operation_type_ratings.py | 18 ++-
12 files changed, 146 insertions(+), 137 deletions(-)
diff --git a/modules/services/serviceAi/mainServiceAi.py b/modules/services/serviceAi/mainServiceAi.py
index 2f1c7443..e4782588 100644
--- a/modules/services/serviceAi/mainServiceAi.py
+++ b/modules/services/serviceAi/mainServiceAi.py
@@ -139,14 +139,12 @@ class AiService:
async def callAiPlanning(
self,
prompt: str,
- placeholders: Optional[List[PromptPlaceholder]] = None,
- options: Optional[AiCallOptions] = None,
- loopInstructionFormat: Optional[str] = None
+ placeholders: Optional[List[PromptPlaceholder]] = None
) -> str:
"""Planning AI call for task planning, action planning, action selection, etc."""
await self._ensureAiObjectsInitialized()
# Always use "json" for planning calls since they return JSON
- return await self.coreAi.callAiPlanning(prompt, placeholders, options, "json")
+ return await self.coreAi.callAiPlanning(prompt, placeholders, "json")
async def callAiDocuments(
self,
diff --git a/modules/services/serviceAi/subCoreAi.py b/modules/services/serviceAi/subCoreAi.py
index 6ab4a260..80992b4b 100644
--- a/modules/services/serviceAi/subCoreAi.py
+++ b/modules/services/serviceAi/subCoreAi.py
@@ -2,7 +2,7 @@ import json
import logging
from typing import Dict, Any, List, Optional, Tuple, Union
from modules.datamodels.datamodelChat import PromptPlaceholder, ChatDocument
-from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum
+from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
from modules.datamodels.datamodelExtraction import ContentPart
from modules.services.serviceAi.subSharedAiUtils import (
buildPromptWithPlaceholders,
@@ -57,6 +57,85 @@ class SubCoreAi:
self.services = services
self.aiObjects = aiObjects
+ async def _analyzePromptAndCreateOptions(self, prompt: str) -> AiCallOptions:
+ """Analyze prompt to determine appropriate AiCallOptions parameters."""
+ try:
+ # Get dynamic enum values from Pydantic models
+ operation_types = [e.value for e in OperationTypeEnum]
+ priorities = [e.value for e in PriorityEnum]
+ processing_modes = [e.value for e in ProcessingModeEnum]
+
+ # Create analysis prompt for AI to determine operation type and parameters
+ analysisPrompt = f"""
+You are an AI operation analyzer. Analyze the following prompt and determine the most appropriate operation type and parameters.
+
+PROMPT TO ANALYZE:
+{self.services.ai.sanitizePromptContent(prompt, 'userinput')}
+
+Based on the prompt content, determine:
+1. operationType: Choose the most appropriate from: {', '.join(operation_types)}
+2. priority: Choose from: {', '.join(priorities)}
+3. processingMode: Choose from: {', '.join(processing_modes)}
+4. compressPrompt: true/false (true for story-like prompts, false for structured prompts with JSON/schemas)
+5. compressContext: true/false (true to summarize context, false to process fully)
+
+Respond with ONLY a JSON object in this exact format:
+{{
+ "operationType": "dataAnalyse",
+ "priority": "balanced",
+ "processingMode": "basic",
+ "compressPrompt": true,
+ "compressContext": true
+}}
+"""
+
+ # Use AI to analyze the prompt
+ request = AiCallRequest(
+ prompt=analysisPrompt,
+ options=AiCallOptions(
+ operationType=OperationTypeEnum.DATA_ANALYSE,
+ priority=PriorityEnum.SPEED,
+ processingMode=ProcessingModeEnum.BASIC,
+ compressPrompt=True,
+ compressContext=False
+ )
+ )
+
+ response = await self.aiObjects.call(request)
+
+ # Parse AI response
+ try:
+ import json
+ json_start = response.content.find('{')
+ json_end = response.content.rfind('}') + 1
+ if json_start != -1 and json_end > json_start:
+ analysis = json.loads(response.content[json_start:json_end])
+
+ # Map string values to enums
+ operation_type = OperationTypeEnum(analysis.get('operationType', 'dataAnalyse'))
+ priority = PriorityEnum(analysis.get('priority', 'balanced'))
+ processing_mode = ProcessingModeEnum(analysis.get('processingMode', 'basic'))
+
+ return AiCallOptions(
+ operationType=operation_type,
+ priority=priority,
+ processingMode=processing_mode,
+ compressPrompt=analysis.get('compressPrompt', True),
+ compressContext=analysis.get('compressContext', True)
+ )
+ except Exception as e:
+ logger.warning(f"Failed to parse AI analysis response: {e}")
+
+ except Exception as e:
+ logger.warning(f"Prompt analysis failed: {e}")
+
+ # Fallback to default options
+ return AiCallOptions(
+ operationType=OperationTypeEnum.DATA_ANALYSE,
+ priority=PriorityEnum.BALANCED,
+ processingMode=ProcessingModeEnum.BASIC
+ )
+
# Shared Core Function for AI Calls with Looping
@@ -342,22 +421,29 @@ CRITICAL REQUIREMENTS:
self,
prompt: str,
placeholders: Optional[List[PromptPlaceholder]] = None,
- options: Optional[AiCallOptions] = None,
loopInstructionFormat: Optional[str] = None
) -> str:
"""
Planning AI call for task planning, action planning, action selection, etc.
+ Always uses static parameters optimized for planning tasks.
Args:
prompt: The planning prompt
placeholders: Optional list of placeholder replacements
- options: AI call configuration options
+ loopInstructionFormat: Optional loop instruction format
Returns:
Planning JSON response
"""
- if options is None:
- options = AiCallOptions()
+ # Planning calls always use static parameters
+ logger.debug("Using static parameters for planning call")
+ options = AiCallOptions(
+ operationType=OperationTypeEnum.PLAN,
+ priority=PriorityEnum.QUALITY,
+ processingMode=ProcessingModeEnum.DETAILED,
+ compressPrompt=False,
+ compressContext=False
+ )
# Build full prompt with placeholders
if placeholders:
@@ -393,17 +479,21 @@ CRITICAL REQUIREMENTS:
Returns:
AI response as string, or dict with documents if outputFormat is specified
"""
- if options is None:
- options = AiCallOptions()
+ if options is None or (hasattr(options, 'operationType') and options.operationType is None):
+ # Use AI to determine parameters ONLY when truly needed (options=None OR operationType=None)
+ logger.debug("Analyzing prompt to determine optimal parameters")
+ options = await self._analyzePromptAndCreateOptions(prompt)
+ else:
+ logger.debug(f"Using provided options: operationType={options.operationType}, priority={options.priority}")
# Handle document generation with specific output format using unified approach
if outputFormat:
# Use unified generation method for all document generation
if documents and len(documents) > 0:
- logger.info(f"Extracting content from {len(documents)} documents")
+ logger.debug(f"Extracting content from {len(documents)} documents")
extracted_content = await self.services.ai.documentProcessor.callAiText(prompt, documents, options)
else:
- logger.info("No documents provided - using direct generation")
+ logger.debug("No documents provided - using direct generation")
extracted_content = None
generation_prompt = await self._buildGenerationPrompt(prompt, extracted_content, outputFormat, title)
generated_json = await self._callAiWithLooping(generation_prompt, options, "document_generation", loopInstructionFormat=loopInstructionFormat)
diff --git a/modules/services/serviceNormalization/mainServiceNormalization.py b/modules/services/serviceNormalization/mainServiceNormalization.py
index 6748be72..d8696fa5 100644
--- a/modules/services/serviceNormalization/mainServiceNormalization.py
+++ b/modules/services/serviceNormalization/mainServiceNormalization.py
@@ -90,7 +90,7 @@ class NormalizationService:
" \"Date\": {\"formats\": [\"DD.MM.YYYY\",\"YYYY-MM-DD\"]}\n }\n}\n"
)
- response = await self.services.ai.callAiPlanning(prompt=prompt, placeholders=None, options=None)
+ response = await self.services.ai.callAiPlanning(prompt=prompt, placeholders=None)
if not response:
return {"mapping": {}, "normalizationPolicy": {}}
diff --git a/modules/services/serviceWorkflow/mainServiceWorkflow.py b/modules/services/serviceWorkflow/mainServiceWorkflow.py
index a29df0c5..8c6acd84 100644
--- a/modules/services/serviceWorkflow/mainServiceWorkflow.py
+++ b/modules/services/serviceWorkflow/mainServiceWorkflow.py
@@ -3,6 +3,7 @@ import uuid
from typing import Dict, Any, List, Optional
from modules.datamodels.datamodelUam import User, UserConnection
from modules.datamodels.datamodelChat import ChatDocument, ChatMessage, ChatStat, ChatLog
+from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
from modules.security.tokenManager import TokenManager
from modules.shared.progressLogger import ProgressLogger
@@ -57,17 +58,18 @@ LOOP_INSTRUCTION
Please provide a comprehensive summary of this conversation."""
# Get summary using AI service through proper main service interface
+
return await self.services.ai.callAiDocuments(
prompt=prompt,
documents=None,
- options={
- "process_type": "text",
- "operation_type": "generate",
- "priority": "speed",
- "compress_prompt": True,
- "compress_documents": False,
- "max_cost": 0.01
- }
+ options=AiCallOptions(
+ operationType=OperationTypeEnum.DATA_GENERATE,
+ priority=PriorityEnum.SPEED,
+ processingMode=ProcessingModeEnum.BASIC,
+ compressPrompt=True,
+ compressContext=False,
+ maxCost=0.01
+ )
)
except Exception as e:
diff --git a/modules/workflows/methods/methodAi.py b/modules/workflows/methods/methodAi.py
index 34bede22..c5122c29 100644
--- a/modules/workflows/methods/methodAi.py
+++ b/modules/workflows/methods/methodAi.py
@@ -10,7 +10,7 @@ from datetime import datetime, UTC
from modules.workflows.methods.methodBase import MethodBase, action
from modules.datamodels.datamodelChat import ActionResult
-from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum, PriorityEnum, ProcessingModeEnum
+from modules.datamodels.datamodelAi import AiCallOptions
from modules.datamodels.datamodelChat import ChatDocument
from modules.aicore.aicorePluginTavily import WebResearchRequest
@@ -40,10 +40,6 @@ class MethodAi(MethodBase):
- aiPrompt (str, required): Instruction for the AI.
- documentList (list, optional): Document reference(s) for context.
- resultType (str, optional): Output file extension - only one extension allowed (e.g. txt, json, md, csv, xml, html, pdf, docx, xlsx, png, ...). Default: txt.
- - processingMode (str, optional): basic | advanced | detailed. Default: basic.
- - priority (str, optional): speed | quality | cost | balanced. Default: balanced.
- - maxCost (float, optional): Cost limit.
- - maxProcessingTime (int, optional): Time limit in seconds.
"""
try:
# Init progress logger
@@ -72,24 +68,6 @@ class MethodAi(MethodBase):
if isinstance(documentList, str):
documentList = [documentList]
resultType = parameters.get("resultType", "txt")
- processingModeStr = parameters.get("processingMode", "basic")
- priorityStr = parameters.get("priority", "balanced")
- maxCost = parameters.get("maxCost")
- maxProcessingTime = parameters.get("maxProcessingTime")
-
- # Dynamic operation type selection based on document presence
- if documentList and len(documentList) > 0:
- # With documents: default to dataExtract (document intelligence)
- operationType = OperationTypeEnum.DATA_EXTRACT
- logger.info(f"action.ai.processAuto-selected operationType EXTRACT (document intelligence mode - {len(documentList)} documents)")
- else:
- # Without documents: default to dataGenerate (content generation)
- operationType = OperationTypeEnum.DATA_GENERATE
- logger.info(f"action.ai.process Auto-selected operationType GENERATE (content generation mode - no documents)")
-
- # Map string parameters to enums using centralized utility function
- priority = self.services.utils.mapToEnum(PriorityEnum, priorityStr, PriorityEnum.BALANCED)
- processingMode = self.services.utils.mapToEnum(ProcessingModeEnum, processingModeStr, ProcessingModeEnum.BASIC)
if not aiPrompt:
@@ -117,25 +95,18 @@ class MethodAi(MethodBase):
# Update progress - preparing AI call
self.services.workflow.progressLogUpdate(operationId, 0.4, "Preparing AI call")
- # Build options and delegate document handling to AI/Extraction/Generation services
+ # Build options with only resultFormat - let service layer handle all other parameters
output_format = output_extension.replace('.', '') or 'txt'
options = AiCallOptions(
- operationType=operationType,
- priority=priority,
- compressPrompt=processingMode != ProcessingModeEnum.DETAILED,
- compressContext=True,
- processDocumentsIndividually=True,
- processingMode=processingMode,
- resultFormat=output_format,
- maxCost=maxCost,
- maxProcessingTime=maxProcessingTime,
+ resultFormat=output_format
+ # Removed all model parameters - service layer will analyze prompt and determine optimal parameters
)
# Update progress - calling AI
self.services.workflow.progressLogUpdate(operationId, 0.6, "Calling AI")
result = await self.services.ai.callAiDocuments(
- prompt=aiPrompt, # Use original prompt, let unified generation handle prompt building
+ prompt=aiPrompt,
documents=chatDocuments if chatDocuments else None,
options=options,
outputFormat=output_format
diff --git a/modules/workflows/processing/adaptive/contentValidator.py b/modules/workflows/processing/adaptive/contentValidator.py
index 1b28e752..66c44b1e 100644
--- a/modules/workflows/processing/adaptive/contentValidator.py
+++ b/modules/workflows/processing/adaptive/contentValidator.py
@@ -116,14 +116,9 @@ DELIVERED CONTENT TO CHECK:
"""
# Call AI service for validation
- from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum
- request_options = AiCallOptions()
- request_options.operationType = OperationTypeEnum.DATA_ANALYSE
-
response = await self.services.ai.callAiPlanning(
prompt=validationPrompt,
- placeholders=None,
- options=request_options
+ placeholders=None
)
# No retries or correction prompts here; parse-or-fail below
diff --git a/modules/workflows/processing/adaptive/intentAnalyzer.py b/modules/workflows/processing/adaptive/intentAnalyzer.py
index ba9629b1..324549b3 100644
--- a/modules/workflows/processing/adaptive/intentAnalyzer.py
+++ b/modules/workflows/processing/adaptive/intentAnalyzer.py
@@ -59,14 +59,9 @@ CRITICAL: Respond with ONLY the JSON object below. Do not include any explanator
"""
# Call AI service for analysis
- from modules.datamodels.datamodelAi import AiCallOptions, OperationTypeEnum
- request_options = AiCallOptions()
- request_options.operationType = OperationTypeEnum.DATA_ANALYSE
-
response = await self.services.ai.callAiPlanning(
prompt=analysisPrompt,
- placeholders=None,
- options=request_options
+ placeholders=None
)
# No retries or correction prompts here; parse-or-fail below
diff --git a/modules/workflows/processing/core/taskPlanner.py b/modules/workflows/processing/core/taskPlanner.py
index 6738f9a2..9d1091bc 100644
--- a/modules/workflows/processing/core/taskPlanner.py
+++ b/modules/workflows/processing/core/taskPlanner.py
@@ -107,9 +107,7 @@ class TaskPlanner:
prompt = await self.services.ai.callAiPlanning(
prompt=taskPlanningPromptTemplate,
- placeholders=placeholders,
- options=options,
- loopInstructionFormat="json"
+ placeholders=placeholders
)
# Check if AI response is valid
diff --git a/modules/workflows/processing/modes/modeActionplan.py b/modules/workflows/processing/modes/modeActionplan.py
index 632ae138..9a54c43e 100644
--- a/modules/workflows/processing/modes/modeActionplan.py
+++ b/modules/workflows/processing/modes/modeActionplan.py
@@ -134,7 +134,7 @@ class ActionplanMode(BaseMode):
maxProcessingTime=30
)
- prompt = await self.services.ai.callAiPlanning(prompt=actionPromptTemplate, placeholders=placeholders, options=options)
+ prompt = await self.services.ai.callAiPlanning(prompt=actionPromptTemplate, placeholders=placeholders)
# Check if AI response is valid
if not prompt:
@@ -466,7 +466,7 @@ class ActionplanMode(BaseMode):
maxProcessingTime=30
)
- response = await self.services.ai.callAiPlanning(prompt=promptTemplate, placeholders=placeholders, options=options)
+ response = await self.services.ai.callAiPlanning(prompt=promptTemplate, placeholders=placeholders)
# Log result review response received
logger.info("=== RESULT REVIEW AI RESPONSE RECEIVED ===")
diff --git a/modules/workflows/processing/modes/modeReact.py b/modules/workflows/processing/modes/modeReact.py
index 1acd8152..33686561 100644
--- a/modules/workflows/processing/modes/modeReact.py
+++ b/modules/workflows/processing/modes/modeReact.py
@@ -185,21 +185,10 @@ class ReactMode(BaseMode):
promptTemplate = bundle.prompt
placeholders = bundle.placeholders
- # Centralized AI call for plan selection (use plan generation quality)
- options = AiCallOptions(
- operationType=OperationTypeEnum.PLAN,
- priority=PriorityEnum.QUALITY,
- compressPrompt=False,
- compressContext=False,
- processingMode=ProcessingModeEnum.DETAILED,
- maxCost=0.10,
- maxProcessingTime=30
- )
-
+ # Centralized AI call for plan selection (uses static planning parameters)
response = await self.services.ai.callAiPlanning(
prompt=promptTemplate,
- placeholders=placeholders,
- options=options
+ placeholders=placeholders
)
jsonStart = response.find('{') if response else -1
jsonEnd = response.rfind('}') + 1 if response else 0
@@ -294,24 +283,10 @@ class ReactMode(BaseMode):
promptTemplate = bundle.prompt
placeholders = bundle.placeholders
- # Centralized AI call for parameter suggestion (balanced analysis)
- options = AiCallOptions(
- operationType=OperationTypeEnum.DATA_ANALYSE,
- priority=PriorityEnum.BALANCED,
- compressPrompt=True,
- compressContext=False,
- processingMode=ProcessingModeEnum.ADVANCED,
- maxCost=0.05,
- maxProcessingTime=30,
- temperature=0.3, # Slightly higher temperature for better instruction following
- # max tokens not set - use model's maximum for big JSON responses
- resultFormat="json" # Explicitly request JSON format
- )
-
+ # Centralized AI call for parameter suggestion (uses static planning parameters)
paramsResp = await self.services.ai.callAiPlanning(
prompt=promptTemplate,
- placeholders=placeholders,
- options=options
+ placeholders=placeholders
)
# Parse JSON response
js = paramsResp[paramsResp.find('{'):paramsResp.rfind('}')+1] if paramsResp else '{}'
@@ -609,21 +584,10 @@ class ReactMode(BaseMode):
promptTemplate = bundle.prompt
placeholders = bundle.placeholders
- # Centralized AI call for refinement decision (balanced analysis)
- options = AiCallOptions(
- operationType=OperationTypeEnum.DATA_ANALYSE,
- priority=PriorityEnum.BALANCED,
- compressPrompt=True,
- compressContext=False,
- processingMode=ProcessingModeEnum.ADVANCED,
- maxCost=0.05,
- maxProcessingTime=30
- )
-
+ # Centralized AI call for refinement decision (uses static planning parameters)
resp = await self.services.ai.callAiPlanning(
prompt=promptTemplate,
- placeholders=placeholders,
- options=options
+ placeholders=placeholders
)
# More robust JSON extraction
@@ -716,14 +680,7 @@ Return only the user-friendly message, no technical details."""
# Call AI to generate user-friendly message
response = await self.services.ai.callAiPlanning(
prompt=prompt,
- placeholders=None,
- options=AiCallOptions(
- operationType=OperationTypeEnum.DATA_GENERATE,
- priority=PriorityEnum.SPEED,
- compressPrompt=True,
- maxCost=0.01,
- maxProcessingTime=5
- )
+ placeholders=None
)
return response.strip() if response else f"Executing {method}.{actionName} action..."
@@ -757,14 +714,7 @@ Return only the user-friendly message, no technical details."""
# Call AI to generate user-friendly result message
response = await self.services.ai.callAiPlanning(
prompt=prompt,
- placeholders=None,
- options=AiCallOptions(
- operationType=OperationTypeEnum.DATA_GENERATE,
- priority=PriorityEnum.SPEED,
- compressPrompt=True,
- maxCost=0.01,
- maxProcessingTime=5
- )
+ placeholders=None
)
return response.strip() if response else f"{method}.{actionName} action completed"
diff --git a/modules/workflows/workflowManager.py b/modules/workflows/workflowManager.py
index f32f8ad4..d52365ef 100644
--- a/modules/workflows/workflowManager.py
+++ b/modules/workflows/workflowManager.py
@@ -218,8 +218,8 @@ class WorkflowManager:
f"User message:\n{self.services.ai.sanitizePromptContent(userInput.prompt, 'userinput')}"
)
- # Call AI analyzer
- aiResponse = await self.services.ai.callAiPlanning(prompt=analyzerPrompt, placeholders=None, options=None)
+ # Call AI analyzer (planning call - will use static parameters)
+ aiResponse = await self.services.ai.callAiPlanning(prompt=analyzerPrompt, placeholders=None)
detectedLanguage = None
normalizedRequest = None
diff --git a/test_operation_type_ratings.py b/test_operation_type_ratings.py
index 907f9fd8..e39f4486 100644
--- a/test_operation_type_ratings.py
+++ b/test_operation_type_ratings.py
@@ -10,7 +10,10 @@ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from modules.datamodels.datamodelAi import OperationTypeEnum, createOperationTypeRatings, AiCallOptions, PriorityEnum, ProcessingModeEnum
from modules.aicore.aicorePluginPerplexity import AiPerplexity
-from modules.aicore.aicorePluginTavily import AiTavily
+from modules.aicore.aicorePluginTavily import ConnectorWeb
+from modules.aicore.aicorePluginAnthropic import AiAnthropic
+from modules.aicore.aicorePluginOpenai import AiOpenai
+from modules.aicore.aicorePluginInternal import AiInternal
from modules.aicore.aicoreModelSelector import ModelSelector
def testOperationTypeRatings():
@@ -20,11 +23,15 @@ def testOperationTypeRatings():
# Initialize connectors
perplexity = AiPerplexity()
- tavily = AiTavily()
+ tavily = ConnectorWeb()
+ anthropic = AiAnthropic()
+ openai = AiOpenai()
+ internal = AiInternal()
modelSelector = ModelSelector()
# Get all models
- allModels = perplexity.getModels() + tavily.getModels()
+ allModels = (perplexity.getModels() + tavily.getModels() +
+ anthropic.getModels() + openai.getModels() + internal.getModels())
print(f"📊 Total models available: {len(allModels)}")
print()
@@ -35,7 +42,10 @@ def testOperationTypeRatings():
(OperationTypeEnum.WEB_NEWS, "Web News"),
(OperationTypeEnum.WEB_QUESTIONS, "Web Questions"),
(OperationTypeEnum.WEB_SEARCH, "Web Search"),
- (OperationTypeEnum.DATA_ANALYSE, "Text Analysis tasks")
+ (OperationTypeEnum.DATA_ANALYSE, "Data Analysis tasks"),
+ (OperationTypeEnum.DATA_GENERATE, "Data Generation tasks"),
+ (OperationTypeEnum.DATA_EXTRACT, "Data Extraction tasks"),
+ (OperationTypeEnum.PLAN, "Planning tasks")
]
for operationType, description in testCases: