Centralized AI continuation agents
This commit is contained in:
parent
e368819b1b
commit
3b53889b7c
15 changed files with 262 additions and 232 deletions
|
|
@ -69,6 +69,8 @@ class AiService:
|
||||||
def coreAi(self):
|
def coreAi(self):
|
||||||
"""Lazy initialization of core AI service."""
|
"""Lazy initialization of core AI service."""
|
||||||
if self._coreAi is None:
|
if self._coreAi is None:
|
||||||
|
if self.aiObjects is None:
|
||||||
|
raise RuntimeError("AiService.aiObjects must be initialized before accessing coreAi. Use await AiService.create() or await service._ensureAiObjectsInitialized()")
|
||||||
logger.info("Lazy initializing SubCoreAi...")
|
logger.info("Lazy initializing SubCoreAi...")
|
||||||
self._coreAi = SubCoreAi(self.services, self.aiObjects)
|
self._coreAi = SubCoreAi(self.services, self.aiObjects)
|
||||||
return self._coreAi
|
return self._coreAi
|
||||||
|
|
@ -153,6 +155,30 @@ class AiService:
|
||||||
await self._ensureAiObjectsInitialized()
|
await self._ensureAiObjectsInitialized()
|
||||||
return await self.webResearchService.webResearch(request)
|
return await self.webResearchService.webResearch(request)
|
||||||
|
|
||||||
|
# Core AI Methods - Delegating to SubCoreAi
|
||||||
|
async def callAiPlanning(
|
||||||
|
self,
|
||||||
|
prompt: str,
|
||||||
|
placeholders: Optional[List[PromptPlaceholder]] = None,
|
||||||
|
options: Optional[AiCallOptions] = None,
|
||||||
|
loopInstruction: Optional[str] = None
|
||||||
|
) -> str:
|
||||||
|
"""Planning AI call for task planning, action planning, action selection, etc."""
|
||||||
|
await self._ensureAiObjectsInitialized()
|
||||||
|
return await self.coreAi.callAiPlanning(prompt, placeholders, options, loopInstruction)
|
||||||
|
|
||||||
|
async def callAiDocuments(
|
||||||
|
self,
|
||||||
|
prompt: str,
|
||||||
|
documents: Optional[List[ChatDocument]] = None,
|
||||||
|
options: Optional[AiCallOptions] = None,
|
||||||
|
outputFormat: Optional[str] = None,
|
||||||
|
title: Optional[str] = None
|
||||||
|
) -> Union[str, Dict[str, Any]]:
|
||||||
|
"""Document generation AI call for all non-planning calls."""
|
||||||
|
await self._ensureAiObjectsInitialized()
|
||||||
|
return await self.coreAi.callAiDocuments(prompt, documents, options, outputFormat, title)
|
||||||
|
|
||||||
|
|
||||||
def sanitizePromptContent(self, content: str, contentType: str = "text") -> str:
|
def sanitizePromptContent(self, content: str, contentType: str = "text") -> str:
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ import logging
|
||||||
from typing import Dict, Any, List, Optional, Tuple, Union
|
from typing import Dict, Any, List, Optional, Tuple, Union
|
||||||
from modules.datamodels.datamodelChat import PromptPlaceholder, ChatDocument
|
from modules.datamodels.datamodelChat import PromptPlaceholder, ChatDocument
|
||||||
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, ModelCapabilities, OperationType, Priority
|
from modules.datamodels.datamodelAi import AiCallRequest, AiCallOptions, ModelCapabilities, OperationType, Priority
|
||||||
from modules.interfaces.interfaceAiObjects import AiObjects
|
from modules.shared.debugLogger import writeDebugFile
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
@ -25,7 +25,8 @@ class SubCoreAi:
|
||||||
self,
|
self,
|
||||||
prompt: str,
|
prompt: str,
|
||||||
options: AiCallOptions,
|
options: AiCallOptions,
|
||||||
debug_prefix: str = "ai_call"
|
debugPrefix: str = "ai_call",
|
||||||
|
loopInstruction: str = None
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Shared core function for AI calls with looping system.
|
Shared core function for AI calls with looping system.
|
||||||
|
|
@ -35,68 +36,85 @@ class SubCoreAi:
|
||||||
Args:
|
Args:
|
||||||
prompt: The prompt to send to AI
|
prompt: The prompt to send to AI
|
||||||
options: AI call configuration options
|
options: AI call configuration options
|
||||||
debug_prefix: Prefix for debug file names
|
debugPrefix: Prefix for debug file names
|
||||||
|
loopInstruction: If provided, replaces LOOP_INSTRUCTION placeholder and includes in continuation prompts
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Complete AI response after all iterations
|
Complete AI response after all iterations
|
||||||
"""
|
"""
|
||||||
max_iterations = 10 # Prevent infinite loops
|
max_iterations = 100 # Prevent infinite loops
|
||||||
iteration = 0
|
iteration = 0
|
||||||
accumulated_content = []
|
accumulatedContent = []
|
||||||
|
|
||||||
logger.info(f"Starting AI call with looping (debug prefix: {debug_prefix})")
|
logger.debug(f"Starting AI call with looping (debug prefix: {debugPrefix}, loopInstruction: {loopInstruction is not None})")
|
||||||
|
|
||||||
# Write initial prompt to debug file
|
# Import debug logger for use in iterations
|
||||||
from modules.shared.debugLogger import writeDebugFile
|
|
||||||
writeDebugFile(prompt, f"{debug_prefix}_prompt", None)
|
# Store original prompt to preserve LOOP_INSTRUCTION placeholder
|
||||||
|
originalPrompt = prompt
|
||||||
|
|
||||||
|
# Handle LOOP_INSTRUCTION placeholder replacement for first iteration
|
||||||
|
if loopInstruction and iteration == 0:
|
||||||
|
if "LOOP_INSTRUCTION" not in prompt:
|
||||||
|
raise ValueError("LOOP_INSTRUCTION placeholder not found in prompt when loopInstruction provided")
|
||||||
|
prompt = prompt.replace("LOOP_INSTRUCTION", loopInstruction)
|
||||||
|
logger.debug("Replaced LOOP_INSTRUCTION placeholder with provided instruction")
|
||||||
|
|
||||||
while iteration < max_iterations:
|
while iteration < max_iterations:
|
||||||
iteration += 1
|
iteration += 1
|
||||||
logger.info(f"AI call iteration {iteration}/{max_iterations}")
|
logger.debug(f"AI call iteration {iteration}/{max_iterations}")
|
||||||
|
|
||||||
# Build iteration prompt
|
# Build iteration prompt
|
||||||
if iteration == 1:
|
if iteration == 1:
|
||||||
iteration_prompt = prompt
|
iterationPrompt = prompt
|
||||||
|
elif loopInstruction and iteration > 1:
|
||||||
|
# Only use continuation logic if loopInstruction is provided
|
||||||
|
iterationPrompt = self._buildContinuationPrompt(originalPrompt, accumulatedContent, iteration, loopInstruction)
|
||||||
else:
|
else:
|
||||||
iteration_prompt = self._buildContinuationPrompt(prompt, accumulated_content, iteration)
|
# No looping - use original prompt
|
||||||
|
iterationPrompt = prompt
|
||||||
|
|
||||||
# Make AI call
|
# Make AI call
|
||||||
try:
|
try:
|
||||||
from modules.datamodels.datamodelAi import AiCallRequest
|
from modules.datamodels.datamodelAi import AiCallRequest
|
||||||
request = AiCallRequest(
|
request = AiCallRequest(
|
||||||
prompt=iteration_prompt,
|
prompt=iterationPrompt,
|
||||||
context="",
|
context="",
|
||||||
options=options
|
options=options
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Write the ACTUAL prompt sent to AI (including continuation context)
|
||||||
|
writeDebugFile(iterationPrompt, f"{debugPrefix}_prompt_iteration_{iteration}", None)
|
||||||
|
|
||||||
response = await self.aiObjects.call(request)
|
response = await self.aiObjects.call(request)
|
||||||
result = response.content
|
result = response.content
|
||||||
|
|
||||||
# Write raw AI response to debug file
|
# Write raw AI response to debug file
|
||||||
writeDebugFile(result, f"{debug_prefix}_response_iteration_{iteration}", None)
|
writeDebugFile(result, f"{debugPrefix}_response_iteration_{iteration}", None)
|
||||||
|
|
||||||
# Emit stats for this iteration
|
# Emit stats for this iteration
|
||||||
self.services.workflow.storeWorkflowStat(
|
self.services.workflow.storeWorkflowStat(
|
||||||
self.services.currentWorkflow,
|
self.services.currentWorkflow,
|
||||||
response,
|
response,
|
||||||
f"ai.call.{debug_prefix}.iteration_{iteration}"
|
f"ai.call.{debugPrefix}.iteration_{iteration}"
|
||||||
)
|
)
|
||||||
|
|
||||||
if not result or not result.strip():
|
if not result or not result.strip():
|
||||||
logger.warning(f"Iteration {iteration}: Empty response, stopping")
|
logger.warning(f"Iteration {iteration}: Empty response, stopping")
|
||||||
break
|
break
|
||||||
|
|
||||||
# Check if this is a continuation response
|
# Check if this is a continuation response (only if loopInstruction is provided)
|
||||||
if "[CONTINUE:" in result:
|
if loopInstruction and "[CONTINUE:" in result:
|
||||||
# Extract the content before the continuation marker
|
# Extract the content before the continuation marker
|
||||||
content_part = result.split("[CONTINUE:")[0].strip()
|
contentPart = result.split("[CONTINUE:")[0].strip()
|
||||||
if content_part:
|
if contentPart:
|
||||||
accumulated_content.append(content_part)
|
accumulatedContent.append(contentPart)
|
||||||
logger.info(f"Iteration {iteration}: Continuation detected, continuing...")
|
logger.debug(f"Iteration {iteration}: Continuation detected, continuing...")
|
||||||
continue
|
continue
|
||||||
else:
|
else:
|
||||||
# This is the final response
|
# This is the final response
|
||||||
accumulated_content.append(result)
|
accumulatedContent.append(result)
|
||||||
logger.info(f"Iteration {iteration}: Final response received")
|
logger.debug(f"Iteration {iteration}: Final response received")
|
||||||
break
|
break
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
@ -107,19 +125,20 @@ class SubCoreAi:
|
||||||
logger.warning(f"AI call stopped after maximum iterations ({max_iterations})")
|
logger.warning(f"AI call stopped after maximum iterations ({max_iterations})")
|
||||||
|
|
||||||
# Combine all accumulated content
|
# Combine all accumulated content
|
||||||
final_result = "\n\n".join(accumulated_content) if accumulated_content else ""
|
final_result = "\n\n".join(accumulatedContent) if accumulatedContent else ""
|
||||||
|
|
||||||
# Write final result to debug file
|
# Write final result to debug file
|
||||||
writeDebugFile(final_result, f"{debug_prefix}_final_result", None)
|
writeDebugFile(final_result, f"{debugPrefix}_final_result", None)
|
||||||
|
|
||||||
logger.info(f"AI call completed: {len(accumulated_content)} parts from {iteration} iterations")
|
logger.info(f"AI call completed: {len(accumulatedContent)} parts from {iteration} iterations")
|
||||||
return final_result
|
return final_result
|
||||||
|
|
||||||
def _buildContinuationPrompt(
|
def _buildContinuationPrompt(
|
||||||
self,
|
self,
|
||||||
base_prompt: str,
|
base_prompt: str,
|
||||||
accumulated_content: List[str],
|
accumulatedContent: List[str],
|
||||||
iteration: int
|
iteration: int,
|
||||||
|
loopInstruction: str = None
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Build a prompt for continuation iterations.
|
Build a prompt for continuation iterations.
|
||||||
|
|
@ -132,11 +151,11 @@ You are continuing from a previous response. Please continue generating content
|
||||||
IMPORTANT:
|
IMPORTANT:
|
||||||
- Continue from the exact point where you stopped
|
- Continue from the exact point where you stopped
|
||||||
- Maintain the same format and structure
|
- Maintain the same format and structure
|
||||||
- If you cannot complete the full response, end with: [CONTINUE: brief description of what still needs to be generated]
|
- {loopInstruction if loopInstruction else "If you cannot complete the full response, end with: [CONTINUE: brief description of what still needs to be generated]"}
|
||||||
- Only stop when the response is completely generated
|
- Only stop when the response is completely generated
|
||||||
|
|
||||||
Previous content generated:
|
Previous content generated:
|
||||||
{chr(10).join(accumulated_content[-1:]) if accumulated_content else "None"}
|
{chr(10).join(accumulatedContent[-1:]) if accumulatedContent else "None"}
|
||||||
|
|
||||||
Continue generating content now:
|
Continue generating content now:
|
||||||
"""
|
"""
|
||||||
|
|
@ -194,7 +213,8 @@ Continue generating content now:
|
||||||
self,
|
self,
|
||||||
prompt: str,
|
prompt: str,
|
||||||
placeholders: Optional[List[PromptPlaceholder]] = None,
|
placeholders: Optional[List[PromptPlaceholder]] = None,
|
||||||
options: Optional[AiCallOptions] = None
|
options: Optional[AiCallOptions] = None,
|
||||||
|
loopInstruction: Optional[str] = None
|
||||||
) -> str:
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Planning AI call for task planning, action planning, action selection, etc.
|
Planning AI call for task planning, action planning, action selection, etc.
|
||||||
|
|
@ -212,13 +232,13 @@ Continue generating content now:
|
||||||
|
|
||||||
# Build full prompt with placeholders
|
# Build full prompt with placeholders
|
||||||
if placeholders:
|
if placeholders:
|
||||||
placeholders_dict = {p.key: p.value for p in placeholders}
|
placeholders_dict = {p.label: p.content for p in placeholders}
|
||||||
full_prompt = self._buildPromptWithPlaceholders(prompt, placeholders_dict)
|
full_prompt = self._buildPromptWithPlaceholders(prompt, placeholders_dict)
|
||||||
else:
|
else:
|
||||||
full_prompt = prompt
|
full_prompt = prompt
|
||||||
|
|
||||||
# Use shared core function with planning-specific debug prefix
|
# Use shared core function with planning-specific debug prefix
|
||||||
return await self._callAiWithLooping(full_prompt, options, "planning")
|
return await self._callAiWithLooping(full_prompt, options, "planning", loopInstruction=loopInstruction)
|
||||||
|
|
||||||
# Document Generation AI Call
|
# Document Generation AI Call
|
||||||
async def callAiDocuments(
|
async def callAiDocuments(
|
||||||
|
|
@ -227,9 +247,7 @@ Continue generating content now:
|
||||||
documents: Optional[List[ChatDocument]] = None,
|
documents: Optional[List[ChatDocument]] = None,
|
||||||
options: Optional[AiCallOptions] = None,
|
options: Optional[AiCallOptions] = None,
|
||||||
outputFormat: Optional[str] = None,
|
outputFormat: Optional[str] = None,
|
||||||
title: Optional[str] = None,
|
title: Optional[str] = None
|
||||||
documentProcessor=None,
|
|
||||||
documentGenerator=None
|
|
||||||
) -> Union[str, Dict[str, Any]]:
|
) -> Union[str, Dict[str, Any]]:
|
||||||
"""
|
"""
|
||||||
Document generation AI call for all non-planning calls.
|
Document generation AI call for all non-planning calls.
|
||||||
|
|
@ -241,8 +259,6 @@ Continue generating content now:
|
||||||
options: AI call configuration options
|
options: AI call configuration options
|
||||||
outputFormat: Optional output format for document generation
|
outputFormat: Optional output format for document generation
|
||||||
title: Optional title for generated documents
|
title: Optional title for generated documents
|
||||||
documentProcessor: Document processing service instance
|
|
||||||
documentGenerator: Document generation service instance
|
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
AI response as string, or dict with documents if outputFormat is specified
|
AI response as string, or dict with documents if outputFormat is specified
|
||||||
|
|
@ -251,24 +267,16 @@ Continue generating content now:
|
||||||
options = AiCallOptions()
|
options = AiCallOptions()
|
||||||
|
|
||||||
# Handle document generation with specific output format using unified approach
|
# Handle document generation with specific output format using unified approach
|
||||||
if outputFormat and documentGenerator:
|
if outputFormat:
|
||||||
# Use unified generation method for all document generation
|
# Use unified generation method for all document generation
|
||||||
if documents and len(documents) > 0:
|
if documents and len(documents) > 0:
|
||||||
# Extract content from documents first
|
|
||||||
logger.info(f"Extracting content from {len(documents)} documents")
|
logger.info(f"Extracting content from {len(documents)} documents")
|
||||||
extracted_content = await documentProcessor.callAiText(prompt, documents, options)
|
extracted_content = await self.services.ai.documentProcessor.callAiText(prompt, documents, options)
|
||||||
# Generate with extracted content using shared core function
|
|
||||||
generation_prompt = await self._buildGenerationPrompt(prompt, extracted_content, outputFormat, title)
|
|
||||||
generated_json = await self._callAiWithLooping(generation_prompt, options, "document_generation")
|
|
||||||
else:
|
else:
|
||||||
# Direct generation without documents
|
|
||||||
logger.info("No documents provided - using direct generation")
|
logger.info("No documents provided - using direct generation")
|
||||||
generation_prompt = await self._buildGenerationPrompt(prompt, None, outputFormat, title)
|
extracted_content = None
|
||||||
generated_json = await self._callAiWithLooping(generation_prompt, options, "document_generation")
|
generation_prompt = await self._buildGenerationPrompt(prompt, extracted_content, outputFormat, title)
|
||||||
|
generated_json = await self._callAiWithLooping(generation_prompt, options, "document_generation", loopInstruction="If you cannot complete the full response, end with: [CONTINUE: brief description of what still needs to be generated]")
|
||||||
# Write the generated JSON to debug file
|
|
||||||
from modules.shared.debugLogger import writeDebugFile
|
|
||||||
writeDebugFile(generated_json, "unified_generation_response", documents)
|
|
||||||
|
|
||||||
# Parse the generated JSON
|
# Parse the generated JSON
|
||||||
try:
|
try:
|
||||||
|
|
@ -313,7 +321,6 @@ Continue generating content now:
|
||||||
|
|
||||||
# Log AI response for debugging
|
# Log AI response for debugging
|
||||||
try:
|
try:
|
||||||
from modules.shared.debugLogger import writeDebugFile
|
|
||||||
writeDebugFile(str(result), "documentGenerationResponse", documents)
|
writeDebugFile(str(result), "documentGenerationResponse", documents)
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
@ -325,14 +332,14 @@ Continue generating content now:
|
||||||
return {"success": False, "error": f"Rendering failed: {str(e)}"}
|
return {"success": False, "error": f"Rendering failed: {str(e)}"}
|
||||||
|
|
||||||
# Handle text calls (no output format specified)
|
# Handle text calls (no output format specified)
|
||||||
if documents and documentProcessor:
|
if documents:
|
||||||
# Use document processing for text calls with documents
|
# Use document processing for text calls with documents
|
||||||
result = await documentProcessor.callAiText(prompt, documents, options)
|
result = await self.services.ai.documentProcessor.callAiText(prompt, documents, options)
|
||||||
else:
|
else:
|
||||||
# Use shared core function for direct text calls
|
# Use shared core function for direct text calls
|
||||||
result = await self._callAiWithLooping(prompt, options, "text")
|
result = await self._callAiWithLooping(prompt, options, "text", loopInstruction=None)
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
# AI Image Analysis
|
# AI Image Analysis
|
||||||
|
|
@ -448,7 +455,7 @@ Continue generating content now:
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# TO CHECK FUNCTIONS TODO
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -195,75 +195,70 @@ Consider the user's intent and the most logical way to organize the extracted co
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
services.utils.debugLogToFile(f"Generic prompt analysis failed: {str(e)}", "PROMPT_BUILDER")
|
services.utils.debugLogToFile(f"Generic prompt analysis failed: {str(e)}", "PROMPT_BUILDER")
|
||||||
|
|
||||||
# Fallback to single-file prompt
|
# Always use the proper generation prompt template with LOOP_INSTRUCTION
|
||||||
example_data = {
|
result = f"""You are an AI assistant that generates structured JSON content for document creation.
|
||||||
"metadata": {
|
|
||||||
"title": "Example Document",
|
|
||||||
"author": "AI Assistant",
|
|
||||||
"source_documents": ["document_001"],
|
|
||||||
"extraction_method": "ai_extraction"
|
|
||||||
},
|
|
||||||
"sections": [
|
|
||||||
{
|
|
||||||
"id": "section_001",
|
|
||||||
"content_type": "heading",
|
|
||||||
"elements": [
|
|
||||||
{
|
|
||||||
"level": 1,
|
|
||||||
"text": "1. SECTION TITLE"
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"order": 1,
|
|
||||||
"metadata": {}
|
|
||||||
}
|
|
||||||
],
|
|
||||||
"summary": "",
|
|
||||||
"tags": []
|
|
||||||
}
|
|
||||||
|
|
||||||
return f"""
|
USER REQUEST: "{userPrompt}"
|
||||||
{userPrompt}
|
DOCUMENT TITLE: "{title}"
|
||||||
|
TARGET FORMAT: {outputFormat}
|
||||||
|
|
||||||
You are a document processing assistant that extracts and structures content from documents. Your task is to analyze the provided document content and create a structured JSON output.
|
TASK: Generate JSON content that fulfills the user's request.
|
||||||
|
|
||||||
TASK: Extract the actual content from the document and organize it into structured sections.
|
CRITICAL: You MUST return ONLY valid JSON in this exact structure:
|
||||||
|
{{
|
||||||
|
"metadata": {{
|
||||||
|
"title": "{title}",
|
||||||
|
"splitStrategy": "single_document",
|
||||||
|
"source_documents": [],
|
||||||
|
"extraction_method": "ai_generation"
|
||||||
|
}},
|
||||||
|
"documents": [
|
||||||
|
{{
|
||||||
|
"id": "doc_1",
|
||||||
|
"title": "{title}",
|
||||||
|
"filename": "document.{outputFormat}",
|
||||||
|
"sections": [
|
||||||
|
{{
|
||||||
|
"id": "section_1",
|
||||||
|
"content_type": "heading",
|
||||||
|
"elements": [
|
||||||
|
{{
|
||||||
|
"level": 1,
|
||||||
|
"text": "1. SECTION TITLE"
|
||||||
|
}}
|
||||||
|
],
|
||||||
|
"order": 1
|
||||||
|
}},
|
||||||
|
{{
|
||||||
|
"id": "section_2",
|
||||||
|
"content_type": "paragraph",
|
||||||
|
"elements": [
|
||||||
|
{{
|
||||||
|
"text": "This is the actual content that should be generated."
|
||||||
|
}}
|
||||||
|
],
|
||||||
|
"order": 2
|
||||||
|
}}
|
||||||
|
]
|
||||||
|
}}
|
||||||
|
]
|
||||||
|
}}
|
||||||
|
|
||||||
REQUIREMENTS:
|
IMPORTANT:
|
||||||
1. Analyze the document content provided in the context below
|
- Return ONLY the JSON structure above
|
||||||
2. Extract all content and organize it into logical sections
|
- Do NOT include any text before or after the JSON
|
||||||
3. Create structured JSON with sections containing the extracted content
|
- Fill in the actual content based on the user request: {userPrompt}
|
||||||
4. Preserve the original structure and data
|
- If the content is too large, you can split it into multiple sections
|
||||||
|
- Each section should have a unique id and appropriate content_type
|
||||||
OUTPUT FORMAT: Return only valid JSON in this exact structure:
|
- LOOP_INSTRUCTION
|
||||||
{json.dumps(example_data, indent=2)}
|
|
||||||
|
|
||||||
Requirements:
|
|
||||||
- Preserve all original data - do not summarize or interpret
|
|
||||||
- Use the exact JSON format shown above
|
|
||||||
- Maintain data integrity and structure
|
|
||||||
|
|
||||||
Content Types to Extract:
|
|
||||||
1. Tables: Extract all rows and columns with proper headers
|
|
||||||
2. Lists: Extract all items with proper nesting
|
|
||||||
3. Headings: Extract with appropriate levels
|
|
||||||
4. Paragraphs: Extract as structured text
|
|
||||||
5. Code: Extract code blocks with language identification
|
|
||||||
6. Images: Analyze images and describe all visible content including text, tables, logos, graphics, layout, and visual elements
|
|
||||||
|
|
||||||
Image Analysis Requirements:
|
|
||||||
- If you cannot analyze an image for any reason, explain why in the JSON response
|
|
||||||
- Describe everything you see in the image
|
|
||||||
- Include all text content, tables, logos, graphics, layout, and visual elements
|
|
||||||
- If the image is too small, corrupted, or unclear, explain this
|
|
||||||
- Always provide feedback - never return empty responses
|
|
||||||
|
|
||||||
Return only the JSON structure with actual data from the documents. Do not include any text before or after the JSON.
|
|
||||||
|
|
||||||
Extract the ACTUAL CONTENT from the source documents. Do not use placeholder text like "Section 1", "Section 2", etc. Extract the real headings, paragraphs, and content from the documents.
|
|
||||||
|
|
||||||
DO NOT return a schema description - return actual extracted content in the JSON format shown above.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
# Debug output
|
||||||
|
if services:
|
||||||
|
services.utils.debugLogToFile(f"GENERATION PROMPT: Generated successfully", "PROMPT_BUILDER")
|
||||||
|
|
||||||
|
return result.strip()
|
||||||
|
|
||||||
async def buildExtractionPrompt(
|
async def buildExtractionPrompt(
|
||||||
outputFormat: str,
|
outputFormat: str,
|
||||||
renderer: _RendererLike,
|
renderer: _RendererLike,
|
||||||
|
|
@ -499,6 +494,8 @@ IMPORTANT:
|
||||||
- Fill in the actual content based on the user request: {safeUserPrompt}
|
- Fill in the actual content based on the user request: {safeUserPrompt}
|
||||||
- If the content is too large, you can split it into multiple sections
|
- If the content is too large, you can split it into multiple sections
|
||||||
- Each section should have a unique id and appropriate content_type
|
- Each section should have a unique id and appropriate content_type
|
||||||
|
|
||||||
|
LOOP_INSTRUCTION
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Debug output
|
# Debug output
|
||||||
|
|
|
||||||
|
|
@ -90,7 +90,7 @@ class NormalizationService:
|
||||||
" \"Date\": {\"formats\": [\"DD.MM.YYYY\",\"YYYY-MM-DD\"]}\n }\n}\n"
|
" \"Date\": {\"formats\": [\"DD.MM.YYYY\",\"YYYY-MM-DD\"]}\n }\n}\n"
|
||||||
)
|
)
|
||||||
|
|
||||||
response = await self.services.ai.coreAi.callAiPlanning(prompt=prompt, placeholders=None, options=None)
|
response = await self.services.ai.callAiPlanning(prompt=prompt, placeholders=None, options=None)
|
||||||
if not response:
|
if not response:
|
||||||
return {"mapping": {}, "normalizationPolicy": {}}
|
return {"mapping": {}, "normalizationPolicy": {}}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,6 @@ from modules.datamodels.datamodelChat import ChatContentExtracted
|
||||||
from modules.services.serviceExtraction.mainServiceExtraction import ExtractionService
|
from modules.services.serviceExtraction.mainServiceExtraction import ExtractionService
|
||||||
from modules.services.serviceGeneration.subDocumentUtility import getFileExtension, getMimeTypeFromExtension, detectContentTypeFromData
|
from modules.services.serviceGeneration.subDocumentUtility import getFileExtension, getMimeTypeFromExtension, detectContentTypeFromData
|
||||||
from modules.shared.timezoneUtils import get_utc_timestamp
|
from modules.shared.timezoneUtils import get_utc_timestamp
|
||||||
from modules.services.serviceAi.mainServiceAi import AiService
|
|
||||||
from modules.security.tokenManager import TokenManager
|
from modules.security.tokenManager import TokenManager
|
||||||
from modules.shared.progressLogger import ProgressLogger
|
from modules.shared.progressLogger import ProgressLogger
|
||||||
|
|
||||||
|
|
@ -43,23 +42,25 @@ class WorkflowService:
|
||||||
break
|
break
|
||||||
|
|
||||||
# Create prompt for AI
|
# Create prompt for AI
|
||||||
prompt = f"""You are an AI assistant providing a summary of a chat conversation.
|
prompt = f"""
|
||||||
Please respond in '{self.user.language}' language.
|
You are an AI assistant providing a summary of a chat conversation.
|
||||||
|
Please respond in '{self.user.language}' language.
|
||||||
|
|
||||||
Chat History:
|
Chat History:
|
||||||
{chr(10).join(f"- {msg.message}" for msg in reversed(relevantMessages))}
|
{chr(10).join(f"- {msg.message}" for msg in reversed(relevantMessages))}
|
||||||
|
|
||||||
Instructions:
|
Instructions:
|
||||||
1. Summarize the conversation's key points and outcomes
|
1. Summarize the conversation's key points and outcomes
|
||||||
2. Be concise but informative
|
2. Be concise but informative
|
||||||
3. Use a professional but friendly tone
|
3. Use a professional but friendly tone
|
||||||
4. Focus on important decisions and next steps if any
|
4. Focus on important decisions and next steps if any
|
||||||
|
|
||||||
Please provide a comprehensive summary of this conversation."""
|
LOOP_INSTRUCTION
|
||||||
|
|
||||||
# Get summary using AI service directly (avoiding circular dependency)
|
Please provide a comprehensive summary of this conversation."""
|
||||||
ai_service = AiService(self)
|
|
||||||
return await ai_service.coreAi.callAiDocuments(
|
# Get summary using AI service through proper main service interface
|
||||||
|
return await self.services.ai.callAiDocuments(
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
documents=None,
|
documents=None,
|
||||||
options={
|
options={
|
||||||
|
|
@ -69,9 +70,7 @@ class WorkflowService:
|
||||||
"compress_prompt": True,
|
"compress_prompt": True,
|
||||||
"compress_documents": False,
|
"compress_documents": False,
|
||||||
"max_cost": 0.01
|
"max_cost": 0.01
|
||||||
},
|
}
|
||||||
documentProcessor=ai_service.documentProcessor,
|
|
||||||
documentGenerator=ai_service.documentGenerator
|
|
||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
|
|
||||||
|
|
@ -127,13 +127,11 @@ class MethodAi(MethodBase):
|
||||||
# Update progress - calling AI
|
# Update progress - calling AI
|
||||||
progressLogger.updateProgress(operationId, 0.6, "Calling AI")
|
progressLogger.updateProgress(operationId, 0.6, "Calling AI")
|
||||||
|
|
||||||
result = await self.services.ai.coreAi.callAiDocuments(
|
result = await self.services.ai.callAiDocuments(
|
||||||
prompt=aiPrompt, # Use original prompt, let unified generation handle prompt building
|
prompt=aiPrompt, # Use original prompt, let unified generation handle prompt building
|
||||||
documents=chatDocuments if chatDocuments else None,
|
documents=chatDocuments if chatDocuments else None,
|
||||||
options=options,
|
options=options,
|
||||||
outputFormat=output_format,
|
outputFormat=output_format
|
||||||
documentProcessor=self.services.ai.documentProcessor,
|
|
||||||
documentGenerator=self.services.ai.documentGenerator
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Update progress - processing result
|
# Update progress - processing result
|
||||||
|
|
|
||||||
|
|
@ -1182,11 +1182,13 @@ Return JSON:
|
||||||
"subject": "subject line",
|
"subject": "subject line",
|
||||||
"body": "email body (HTML allowed)",
|
"body": "email body (HTML allowed)",
|
||||||
"attachments": ["doc_ref1", "doc_ref2"]
|
"attachments": ["doc_ref1", "doc_ref2"]
|
||||||
}}"""
|
}}
|
||||||
|
|
||||||
|
LOOP_INSTRUCTION"""
|
||||||
|
|
||||||
# Call AI service to generate email content
|
# Call AI service to generate email content
|
||||||
try:
|
try:
|
||||||
ai_response = await self.services.ai.coreAi.callAiDocuments(
|
ai_response = await self.services.ai.callAiDocuments(
|
||||||
prompt=ai_prompt,
|
prompt=ai_prompt,
|
||||||
documents=chatDocuments,
|
documents=chatDocuments,
|
||||||
options=AiCallOptions(
|
options=AiCallOptions(
|
||||||
|
|
@ -1199,9 +1201,7 @@ Return JSON:
|
||||||
resultFormat="json",
|
resultFormat="json",
|
||||||
maxCost=0.50,
|
maxCost=0.50,
|
||||||
maxProcessingTime=30
|
maxProcessingTime=30
|
||||||
),
|
)
|
||||||
documentProcessor=self.services.ai.documentProcessor,
|
|
||||||
documentGenerator=self.services.ai.documentGenerator
|
|
||||||
)
|
)
|
||||||
|
|
||||||
# Parse AI response
|
# Parse AI response
|
||||||
|
|
|
||||||
|
|
@ -120,7 +120,7 @@ DELIVERED CONTENT TO CHECK:
|
||||||
request_options = AiCallOptions()
|
request_options = AiCallOptions()
|
||||||
request_options.operationType = OperationType.GENERAL
|
request_options.operationType = OperationType.GENERAL
|
||||||
|
|
||||||
response = await self.services.ai.coreAi.callAiPlanning(
|
response = await self.services.ai.callAiPlanning(
|
||||||
prompt=validationPrompt,
|
prompt=validationPrompt,
|
||||||
placeholders=None,
|
placeholders=None,
|
||||||
options=request_options
|
options=request_options
|
||||||
|
|
|
||||||
|
|
@ -63,7 +63,7 @@ CRITICAL: Respond with ONLY the JSON object below. Do not include any explanator
|
||||||
request_options = AiCallOptions()
|
request_options = AiCallOptions()
|
||||||
request_options.operationType = OperationType.GENERAL
|
request_options.operationType = OperationType.GENERAL
|
||||||
|
|
||||||
response = await self.services.ai.coreAi.callAiPlanning(
|
response = await self.services.ai.callAiPlanning(
|
||||||
prompt=analysisPrompt,
|
prompt=analysisPrompt,
|
||||||
placeholders=None,
|
placeholders=None,
|
||||||
options=request_options
|
options=request_options
|
||||||
|
|
|
||||||
|
|
@ -105,7 +105,7 @@ class TaskPlanner:
|
||||||
maxProcessingTime=30
|
maxProcessingTime=30
|
||||||
)
|
)
|
||||||
|
|
||||||
prompt = await self.services.ai.coreAi.callAiPlanning(
|
prompt = await self.services.ai.callAiPlanning(
|
||||||
prompt=taskPlanningPromptTemplate,
|
prompt=taskPlanningPromptTemplate,
|
||||||
placeholders=placeholders,
|
placeholders=placeholders,
|
||||||
options=options
|
options=options
|
||||||
|
|
|
||||||
|
|
@ -137,7 +137,7 @@ class ActionplanMode(BaseMode):
|
||||||
maxProcessingTime=30
|
maxProcessingTime=30
|
||||||
)
|
)
|
||||||
|
|
||||||
prompt = await self.services.ai.coreAi.callAiPlanning(prompt=actionPromptTemplate, placeholders=placeholders, options=options)
|
prompt = await self.services.ai.callAiPlanning(prompt=actionPromptTemplate, placeholders=placeholders, options=options)
|
||||||
|
|
||||||
# Check if AI response is valid
|
# Check if AI response is valid
|
||||||
if not prompt:
|
if not prompt:
|
||||||
|
|
@ -476,7 +476,7 @@ class ActionplanMode(BaseMode):
|
||||||
maxProcessingTime=30
|
maxProcessingTime=30
|
||||||
)
|
)
|
||||||
|
|
||||||
response = await self.services.ai.coreAi.callAiPlanning(prompt=promptTemplate, placeholders=placeholders, options=options)
|
response = await self.services.ai.callAiPlanning(prompt=promptTemplate, placeholders=placeholders, options=options)
|
||||||
|
|
||||||
# Log result review response received
|
# Log result review response received
|
||||||
logger.info("=== RESULT REVIEW AI RESPONSE RECEIVED ===")
|
logger.info("=== RESULT REVIEW AI RESPONSE RECEIVED ===")
|
||||||
|
|
|
||||||
|
|
@ -201,7 +201,7 @@ class ReactMode(BaseMode):
|
||||||
maxProcessingTime=30
|
maxProcessingTime=30
|
||||||
)
|
)
|
||||||
|
|
||||||
response = await self.services.ai.coreAi.callAiPlanning(
|
response = await self.services.ai.callAiPlanning(
|
||||||
prompt=promptTemplate,
|
prompt=promptTemplate,
|
||||||
placeholders=placeholders,
|
placeholders=placeholders,
|
||||||
options=options
|
options=options
|
||||||
|
|
@ -313,7 +313,7 @@ class ReactMode(BaseMode):
|
||||||
resultFormat="json" # Explicitly request JSON format
|
resultFormat="json" # Explicitly request JSON format
|
||||||
)
|
)
|
||||||
|
|
||||||
paramsResp = await self.services.ai.coreAi.callAiPlanning(
|
paramsResp = await self.services.ai.callAiPlanning(
|
||||||
prompt=promptTemplate,
|
prompt=promptTemplate,
|
||||||
placeholders=placeholders,
|
placeholders=placeholders,
|
||||||
options=options
|
options=options
|
||||||
|
|
@ -625,7 +625,7 @@ class ReactMode(BaseMode):
|
||||||
maxProcessingTime=30
|
maxProcessingTime=30
|
||||||
)
|
)
|
||||||
|
|
||||||
resp = await self.services.ai.coreAi.callAiPlanning(
|
resp = await self.services.ai.callAiPlanning(
|
||||||
prompt=promptTemplate,
|
prompt=promptTemplate,
|
||||||
placeholders=placeholders,
|
placeholders=placeholders,
|
||||||
options=options
|
options=options
|
||||||
|
|
@ -719,7 +719,7 @@ User language: {userLanguage}
|
||||||
Return only the user-friendly message, no technical details."""
|
Return only the user-friendly message, no technical details."""
|
||||||
|
|
||||||
# Call AI to generate user-friendly message
|
# Call AI to generate user-friendly message
|
||||||
response = await self.services.ai.coreAi.callAiPlanning(
|
response = await self.services.ai.callAiPlanning(
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
placeholders=None,
|
placeholders=None,
|
||||||
options=AiCallOptions(
|
options=AiCallOptions(
|
||||||
|
|
@ -760,7 +760,7 @@ Result context: {resultContext}
|
||||||
Return only the user-friendly message, no technical details."""
|
Return only the user-friendly message, no technical details."""
|
||||||
|
|
||||||
# Call AI to generate user-friendly result message
|
# Call AI to generate user-friendly result message
|
||||||
response = await self.services.ai.coreAi.callAiPlanning(
|
response = await self.services.ai.callAiPlanning(
|
||||||
prompt=prompt,
|
prompt=prompt,
|
||||||
placeholders=None,
|
placeholders=None,
|
||||||
options=AiCallOptions(
|
options=AiCallOptions(
|
||||||
|
|
|
||||||
|
|
@ -32,98 +32,100 @@ def generateActionDefinitionPrompt(services, context: Any) -> PromptBundle:
|
||||||
|
|
||||||
template = """# Action Definition
|
template = """# Action Definition
|
||||||
|
|
||||||
Generate the next action to advance toward completing the task objective.
|
Generate the next action to advance toward completing the task objective.
|
||||||
|
|
||||||
## 📋 Context
|
## 📋 Context
|
||||||
|
|
||||||
### User Language
|
### User Language
|
||||||
{{KEY:USER_LANGUAGE}}
|
{{KEY:USER_LANGUAGE}}
|
||||||
|
|
||||||
### Task Objective
|
### Task Objective
|
||||||
{{KEY:USER_PROMPT}}
|
{{KEY:USER_PROMPT}}
|
||||||
|
|
||||||
### Available Documents
|
### Available Documents
|
||||||
{{KEY:AVAILABLE_DOCUMENTS_SUMMARY}}
|
{{KEY:AVAILABLE_DOCUMENTS_SUMMARY}}
|
||||||
|
|
||||||
### Available Connections
|
### Available Connections
|
||||||
{{KEY:AVAILABLE_CONNECTIONS_INDEX}}
|
{{KEY:AVAILABLE_CONNECTIONS_INDEX}}
|
||||||
|
|
||||||
### Workflow History
|
### Workflow History
|
||||||
{{KEY:WORKFLOW_HISTORY}}
|
{{KEY:WORKFLOW_HISTORY}}
|
||||||
|
|
||||||
### Available Methods
|
### Available Methods
|
||||||
{{KEY:AVAILABLE_METHODS}}
|
{{KEY:AVAILABLE_METHODS}}
|
||||||
|
|
||||||
## ⚠️ RULES
|
## ⚠️ RULES
|
||||||
|
|
||||||
### Action Names
|
### Action Names
|
||||||
- **Use EXACT compound action names** from AVAILABLE_METHODS (e.g., "ai.process", "document.extract", "web.search")
|
- **Use EXACT compound action names** from AVAILABLE_METHODS (e.g., "ai.process", "document.extract", "web.search")
|
||||||
- **DO NOT create** new action names - only use those listed in AVAILABLE_METHODS
|
- **DO NOT create** new action names - only use those listed in AVAILABLE_METHODS
|
||||||
- **DO NOT separate** method and action names - use the full compound name
|
- **DO NOT separate** method and action names - use the full compound name
|
||||||
|
|
||||||
### Parameter Guidelines
|
### Parameter Guidelines
|
||||||
- **Use exact document references** from AVAILABLE_DOCUMENTS_INDEX
|
- **Use exact document references** from AVAILABLE_DOCUMENTS_INDEX
|
||||||
- **Use exact connection references** from AVAILABLE_CONNECTIONS_INDEX
|
- **Use exact connection references** from AVAILABLE_CONNECTIONS_INDEX
|
||||||
- **Include user language** if relevant
|
- **Include user language** if relevant
|
||||||
- **Avoid unnecessary fields** - host applies defaults
|
- **Avoid unnecessary fields** - host applies defaults
|
||||||
|
|
||||||
## 📊 Required JSON Structure
|
## 📊 Required JSON Structure
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"actions": [
|
"actions": [
|
||||||
{
|
{
|
||||||
"action": "method.action_name",
|
"action": "method.action_name",
|
||||||
"parameters": {},
|
"parameters": {},
|
||||||
"resultLabel": "round{current_round}_task{current_task}_action{action_number}_{descriptive_label}",
|
"resultLabel": "round{current_round}_task{current_task}_action{action_number}_{descriptive_label}",
|
||||||
"description": "What this action accomplishes",
|
"description": "What this action accomplishes",
|
||||||
"userMessage": "User-friendly message in language '{{KEY:USER_LANGUAGE}}'"
|
"userMessage": "User-friendly message in language '{{KEY:USER_LANGUAGE}}'"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## ✅ Correct Example
|
## ✅ Correct Example
|
||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
"actions": [
|
"actions": [
|
||||||
{
|
{
|
||||||
"action": "document.extract",
|
"action": "document.extract",
|
||||||
"parameters": {"documentList": ["docList:msg_123:results"]},
|
"parameters": {"documentList": ["docList:msg_123:results"]},
|
||||||
"resultLabel": "round1_task1_action1_extract_results",
|
"resultLabel": "round1_task1_action1_extract_results",
|
||||||
"description": "Extract data from documents",
|
"description": "Extract data from documents",
|
||||||
"userMessage": "Extracting data from documents"
|
"userMessage": "Extracting data from documents"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## 🎯 Action Planning Guidelines
|
## 🎯 Action Planning Guidelines
|
||||||
|
|
||||||
### Method Selection
|
### Method Selection
|
||||||
- **Choose appropriate method** based on task requirements
|
- **Choose appropriate method** based on task requirements
|
||||||
- **Consider available resources** (documents, connections)
|
- **Consider available resources** (documents, connections)
|
||||||
- **Match method capabilities** to task objectives
|
- **Match method capabilities** to task objectives
|
||||||
|
|
||||||
### Parameter Design
|
### Parameter Design
|
||||||
- **Use ACTION SIGNATURE** to understand required parameters
|
- **Use ACTION SIGNATURE** to understand required parameters
|
||||||
- **Convert objective** into appropriate parameter values
|
- **Convert objective** into appropriate parameter values
|
||||||
- **Include all required parameters** for the action
|
- **Include all required parameters** for the action
|
||||||
|
|
||||||
### Result Labeling
|
### Result Labeling
|
||||||
- **Use descriptive labels** that explain what the action produces
|
- **Use descriptive labels** that explain what the action produces
|
||||||
- **Follow naming convention**: `round{round}_task{task}_action{action}_{label}`
|
- **Follow naming convention**: `round{round}_task{task}_action{action}_{label}`
|
||||||
- **Make labels meaningful** for future reference
|
- **Make labels meaningful** for future reference
|
||||||
|
|
||||||
### User Messages
|
### User Messages
|
||||||
- **Write in user language:** '{{KEY:USER_LANGUAGE}}'
|
- **Write in user language:** '{{KEY:USER_LANGUAGE}}'
|
||||||
- **Explain what's happening** in user-friendly terms
|
- **Explain what's happening** in user-friendly terms
|
||||||
- **Keep messages concise** but informative
|
- **Keep messages concise** but informative
|
||||||
|
|
||||||
## 🚀 Response Format
|
## 🚀 Response Format
|
||||||
Return ONLY the JSON object."""
|
Return ONLY the JSON object with complete action objects. If you cannot complete the full response, ensure each action object is complete and valid.
|
||||||
|
LOOP_INSTRUCTION
|
||||||
|
"""
|
||||||
|
|
||||||
return PromptBundle(prompt=template, placeholders=placeholders)
|
return PromptBundle(prompt=template, placeholders=placeholders)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -129,6 +129,7 @@ Break down user requests into logical, executable task steps.
|
||||||
- **High**: Complex strategic tasks (6+ actions)
|
- **High**: Complex strategic tasks (6+ actions)
|
||||||
|
|
||||||
## 🚀 Response Format
|
## 🚀 Response Format
|
||||||
Return ONLY the JSON object."""
|
Return ONLY the JSON object with complete task objects. If you cannot complete the full response, ensure each task object is complete and valid.
|
||||||
|
LOOP_INSTRUCTION
|
||||||
|
"""
|
||||||
return PromptBundle(prompt=template, placeholders=placeholders)
|
return PromptBundle(prompt=template, placeholders=placeholders)
|
||||||
|
|
|
||||||
|
|
@ -220,7 +220,7 @@ class WorkflowManager:
|
||||||
)
|
)
|
||||||
|
|
||||||
# Call AI analyzer
|
# Call AI analyzer
|
||||||
aiResponse = await self.services.ai.coreAi.callAiPlanning(prompt=analyzerPrompt, placeholders=None, options=None)
|
aiResponse = await self.services.ai.callAiPlanning(prompt=analyzerPrompt, placeholders=None, options=None)
|
||||||
|
|
||||||
detectedLanguage = None
|
detectedLanguage = None
|
||||||
normalizedRequest = None
|
normalizedRequest = None
|
||||||
|
|
|
||||||
Loading…
Reference in a new issue