ValueOn Lead to Offer durchgespielt, bugfixes in Dateigenerierung und ai nodes

This commit is contained in:
Ida 2026-05-03 18:01:10 +02:00
parent f96325f804
commit e6ca6a9d8e
24 changed files with 564 additions and 76 deletions

View file

@ -351,6 +351,7 @@ class AiAnthropic(BaseConnectorAi):
# Parse response # Parse response
anthropicResponse = response.json() anthropicResponse = response.json()
stop_reason = anthropicResponse.get("stop_reason")
# Extract content and tool_use blocks from response # Extract content and tool_use blocks from response
content = "" content = ""
@ -374,9 +375,25 @@ class AiAnthropic(BaseConnectorAi):
if not content and not toolCalls: if not content and not toolCalls:
logger.warning(f"Anthropic API returned empty content. Full response: {anthropicResponse}") logger.warning(f"Anthropic API returned empty content. Full response: {anthropicResponse}")
content = "[Anthropic API returned empty response]" err = (
"Anthropic refused the request (content policy) — try another model or adjust the prompt."
if stop_reason == "refusal"
else f"Anthropic returned no assistant text (stop_reason={stop_reason or 'unknown'})."
)
return AiModelResponse(
content="",
success=False,
error=err,
modelId=model.name,
metadata={
"response_id": anthropicResponse.get("id", ""),
"stop_reason": stop_reason,
},
)
metadata = {"response_id": anthropicResponse.get("id", "")} metadata = {"response_id": anthropicResponse.get("id", "")}
if stop_reason:
metadata["stop_reason"] = stop_reason
if toolCalls: if toolCalls:
metadata["toolCalls"] = toolCalls metadata["toolCalls"] = toolCalls
@ -492,6 +509,19 @@ class AiAnthropic(BaseConnectorAi):
f"Anthropic stream returned empty response: model={model.name}, " f"Anthropic stream returned empty response: model={model.name}, "
f"stopReason={stopReason}" f"stopReason={stopReason}"
) )
err = (
"Anthropic refused the request (content policy) — try another model or adjust the prompt."
if stopReason == "refusal"
else f"Anthropic returned no assistant text (stop_reason={stopReason or 'unknown'})."
)
yield AiModelResponse(
content="",
success=False,
error=err,
modelId=model.name,
metadata={"stopReason": stopReason} if stopReason else {},
)
return
metadata: Dict[str, Any] = {} metadata: Dict[str, Any] = {}
if stopReason: if stopReason:

View file

@ -834,7 +834,10 @@ class DatabaseConnector:
createdTs = record.get("sysCreatedAt") createdTs = record.get("sysCreatedAt")
if createdTs is None or createdTs == 0 or createdTs == 0.0: if createdTs is None or createdTs == 0 or createdTs == 0.0:
record["sysCreatedAt"] = currentTime record["sysCreatedAt"] = currentTime
if effective_user_id: # Do not wipe caller-provided sysCreatedBy (e.g. FileItem from createFile with
# real user). ContextVar can be "system" for the DB pool while the business
# user is set on the record from model_dump().
if effective_user_id and not record.get("sysCreatedBy"):
record["sysCreatedBy"] = effective_user_id record["sysCreatedBy"] = effective_user_id
elif not record.get("sysCreatedBy"): elif not record.get("sysCreatedBy"):
if effective_user_id: if effective_user_id:
@ -1531,7 +1534,7 @@ class DatabaseConnector:
createdTs = rec.get("sysCreatedAt") createdTs = rec.get("sysCreatedAt")
if createdTs is None or createdTs == 0 or createdTs == 0.0: if createdTs is None or createdTs == 0 or createdTs == 0.0:
rec["sysCreatedAt"] = currentTime rec["sysCreatedAt"] = currentTime
if effectiveUserId: if effectiveUserId and not rec.get("sysCreatedBy"):
rec["sysCreatedBy"] = effectiveUserId rec["sysCreatedBy"] = effectiveUserId
elif not rec.get("sysCreatedBy") and effectiveUserId: elif not rec.get("sysCreatedBy") and effectiveUserId:
rec["sysCreatedBy"] = effectiveUserId rec["sysCreatedBy"] = effectiveUserId

View file

@ -132,6 +132,14 @@ AI_NODES = [
"parameters": [ "parameters": [
{"name": "prompt", "type": "str", "required": True, "frontendType": "textarea", {"name": "prompt", "type": "str", "required": True, "frontendType": "textarea",
"description": t("Generierungs-Prompt")}, "description": t("Generierungs-Prompt")},
{"name": "outputFormat", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["docx", "pdf", "txt", "html", "md"]},
"description": t("Ausgabeformat"), "default": "docx"},
{"name": "title", "type": "str", "required": False, "frontendType": "text",
"description": t("Dokumenttitel (Metadaten / Dateiname)"), "default": ""},
{"name": "documentType", "type": "str", "required": False, "frontendType": "select",
"frontendOptions": {"options": ["letter", "memo", "proposal", "contract", "report", "email"]},
"description": t("Dokumentart (Inhaltshinweis fuer die KI)"), "default": "proposal"},
{"name": "context", "type": "Any", "required": False, "frontendType": "contextBuilder", {"name": "context", "type": "Any", "required": False, "frontendType": "contextBuilder",
"description": t("Daten aus vorherigen Schritten"), "default": ""}, "description": t("Daten aus vorherigen Schritten"), "default": ""},
{"name": "documentList", "type": "DocumentList", "required": False, "frontendType": "hidden", {"name": "documentList", "type": "DocumentList", "required": False, "frontendType": "hidden",

View file

@ -28,7 +28,7 @@ FILE_NODES = [
], ],
"inputs": 1, "inputs": 1,
"outputs": 1, "outputs": 1,
"inputPorts": {0: {"accepts": ["AiResult", "TextResult", "Transit"]}}, "inputPorts": {0: {"accepts": ["AiResult", "TextResult", "Transit", "FormPayload"]}},
"outputPorts": {0: {"schema": "DocumentList"}}, "outputPorts": {0: {"schema": "DocumentList"}},
"meta": {"icon": "mdi-file-plus-outline", "color": "#2196F3", "usesAi": False}, "meta": {"icon": "mdi-file-plus-outline", "color": "#2196F3", "usesAi": False},
"_method": "file", "_method": "file",

View file

@ -221,9 +221,9 @@ PORT_TYPE_CATALOG: Dict[str, PortSchema] = {
PortField(name="prompt", type="str", PortField(name="prompt", type="str",
description="Prompt"), description="Prompt"),
PortField(name="response", type="str", PortField(name="response", type="str",
description="Antworttext"), description="Antworttext", recommended=True),
PortField(name="responseData", type="Dict", required=False, PortField(name="responseData", type="Dict", required=False,
description="Strukturierte Antwort"), description="Strukturierte Antwort (nur bei JSON-Ausgabe)"),
PortField(name="context", type="str", PortField(name="context", type="str",
description="Kontext"), description="Kontext"),
PortField(name="documents", type="List[Document]", PortField(name="documents", type="List[Document]",
@ -660,8 +660,11 @@ def normalizeToSchema(raw: Any, schemaName: str) -> Dict[str, Any]:
if not schema or schemaName == "Transit": if not schema or schemaName == "Transit":
return result return result
# Only default **required** fields. Optional fields stay absent so DataRefs / context
# resolution never pick a synthetic `{}` or `[]` (e.g. AiResult.responseData when the
# model returned plain text only).
for field in schema.fields: for field in schema.fields:
if field.name not in result: if field.name not in result and field.required:
result[field.name] = _defaultForType(field.type) result[field.name] = _defaultForType(field.type)
return result return result

View file

@ -115,6 +115,14 @@ class ComponentObjects:
# Update database context # Update database context
self.db.updateContext(self.userId) self.db.updateContext(self.userId)
def _effective_user_id(self) -> Optional[str]:
"""User id for audit + FileData writes; singleton hub may unset userId but keep currentUser."""
if self.userId:
return self.userId
if self.currentUser is not None:
return getattr(self.currentUser, "id", None)
return None
def __del__(self): def __del__(self):
"""Cleanup method to close database connection.""" """Cleanup method to close database connection."""
if hasattr(self, 'db') and self.db is not None: if hasattr(self, 'db') and self.db is not None:
@ -1379,9 +1387,30 @@ class ComponentObjects:
fileSize=fileSize, fileSize=fileSize,
fileHash=fileHash, fileHash=fileHash,
) )
# Ensure audit user is always stored: workflow/singleton contexts sometimes leave
# the connector without _current_user_id, so _saveRecord skips sysCreatedBy →
# getFile/createFileData RBAC then breaks (None != self.userId).
uid = self._effective_user_id()
if uid:
fileItem = fileItem.model_copy(update={"sysCreatedBy": str(uid)})
# Store in database # Store in database
self.db.recordCreate(FileItem, fileItem) self.db.recordCreate(FileItem, fileItem)
verify = self.db.getRecordset(FileItem, recordFilter={"id": fileItem.id})
verify_creator = (verify[0].get("sysCreatedBy") if verify else None)
logger.info(
"createFile: id=%s name=%s scope=%s model_sysCreatedBy=%r db_sysCreatedBy=%r mandateId=%r featureInstanceId=%r "
"verify_rows=%s db=%s",
fileItem.id,
uniqueName,
fileItem.scope,
getattr(fileItem, "sysCreatedBy", None),
verify_creator,
mandateId or None,
featureInstanceId if featureInstanceId else None,
len(verify) if verify else 0,
getattr(self.db, "dbDatabase", "?"),
)
return fileItem return fileItem
@ -1580,13 +1609,133 @@ class ComponentObjects:
# FileData methods - data operations # FileData methods - data operations
def _getFileItemForDataWrite(self, fileId: str) -> Optional[FileItem]:
"""Resolve FileItem for storing FileData: RBAC-aware getFile, then same-user row fallback.
createFile() can insert a row that getFile() still hides (e.g. scope NULL vs GROUP rules,
or connector / context edge cases). The creator must still be allowed to attach blob data.
"""
logger.info(
"[FileData] resolve start fileId=%s iface_userId=%r effective_uid=%r mandateId=%r featureInstanceId=%r db=%s",
fileId,
self.userId,
self._effective_user_id(),
self.mandateId,
self.featureInstanceId,
getattr(self.db, "dbDatabase", "?"),
)
file = self.getFile(fileId)
if file:
logger.info("[FileData] getFile OK fileId=%s", fileId)
return file
uid = self._effective_user_id()
if not uid:
logger.error(
"[FileData] FAIL no user id fileId=%s userId=%r hasCurrentUser=%s",
fileId,
self.userId,
self.currentUser is not None,
)
return None
uid_s = str(uid)
rows = self.db.getRecordset(FileItem, recordFilter={"id": fileId})
if not rows:
logger.error(
"[FileData] FAIL no FileItem row fileId=%s (createFile committed to same db? db=%s)",
fileId,
getattr(self.db, "dbDatabase", "?"),
)
return None
row = dict(rows[0])
creator = row.get("sysCreatedBy")
creator_s = str(creator) if creator is not None else None
if creator_s != uid_s:
if not creator_s:
try:
self.db.recordModify(FileItem, fileId, {"sysCreatedBy": uid_s})
row["sysCreatedBy"] = uid_s
logger.warning(
"[FileData] patched NULL sysCreatedBy fileId=%s -> %s",
fileId,
uid_s,
)
except Exception as e:
logger.error(
"[FileData] FAIL patch sysCreatedBy fileId=%s: %s",
fileId,
e,
exc_info=True,
)
return None
else:
# _saveRecord used to overwrite explicit creators with contextvar "system"
if creator_s == "system":
try:
self.db.recordModify(FileItem, fileId, {"sysCreatedBy": uid_s})
row["sysCreatedBy"] = uid_s
logger.warning(
"[FileData] patched sysCreatedBy system→user fileId=%s -> %s",
fileId,
uid_s,
)
except Exception as e:
logger.error(
"[FileData] FAIL patch system sysCreatedBy fileId=%s: %s",
fileId,
e,
exc_info=True,
)
return None
else:
logger.error(
"[FileData] FAIL creator mismatch fileId=%s row.sysCreatedBy=%r (%s) effective_uid=%r (%s) scope=%r",
fileId,
creator,
type(creator).__name__,
uid,
type(uid).__name__,
row.get("scope"),
)
return None
logger.info(
"[FileData] RBAC miss, owner fallback OK fileId=%s scope=%r sysCreatedBy=%r",
fileId,
row.get("scope"),
row.get("sysCreatedBy"),
)
try:
if row.get("sysCreatedAt") is None or row.get("sysCreatedAt") in (0, 0.0):
row["sysCreatedAt"] = getUtcTimestamp()
if row.get("scope") is None:
row["scope"] = "personal"
if row.get("neutralize") is None:
row["neutralize"] = False
return FileItem(**row)
except Exception as e:
logger.error(
"[FileData] FAIL FileItem(**row) fileId=%s keys=%s err=%s",
fileId,
list(row.keys()),
e,
exc_info=True,
)
return None
def createFileData(self, fileId: str, data: bytes) -> bool: def createFileData(self, fileId: str, data: bytes) -> bool:
"""Stores the binary data of a file in the database.""" """Stores the binary data of a file in the database."""
try: try:
logger.info(
"[FileData] createFileData enter fileId=%s bytes=%s",
fileId,
len(data) if data is not None else 0,
)
# Check file access # Check file access
file = self.getFile(fileId) file = self._getFileItemForDataWrite(fileId)
if not file: if not file:
logger.error(f"File with ID {fileId} not found when storing data") logger.error(
"[FileData] FAIL _getFileItemForDataWrite returned None fileId=%s",
fileId,
)
return False return False
# Determine if this is a text-based format # Determine if this is a text-based format
@ -1631,12 +1780,10 @@ class ComponentObjects:
self.db.recordCreate(FileData, fileDataObj) self.db.recordCreate(FileData, fileDataObj)
# Clear cache to ensure fresh data logger.info("[FileData] recordCreate OK fileId=%s base64Encoded=%s", fileId, base64Encoded)
logger.debug(f"Successfully stored data for file {fileId} (base64Encoded: {base64Encoded})")
return True return True
except Exception as e: except Exception as e:
logger.error(f"Error storing data for file {fileId}: {str(e)}") logger.error("Error storing data for file %s: %s", fileId, e, exc_info=True)
return False return False
def getFileData(self, fileId: str) -> Optional[bytes]: def getFileData(self, fileId: str) -> Optional[bytes]:

View file

@ -747,6 +747,7 @@ def buildFilesScopeWhereClause(
Only own files: sysCreatedBy = currentUser Only own files: sysCreatedBy = currentUser
WITH instance context (Instanz-Seiten): WITH instance context (Instanz-Seiten):
- scope = 'personal' AND sysCreatedBy = me (creator's personal files; e.g. workflow outputs)
- sysCreatedBy = me AND featureInstanceId = X (own personal files of this instance) - sysCreatedBy = me AND featureInstanceId = X (own personal files of this instance)
- scope = 'featureInstance' AND featureInstanceId = X - scope = 'featureInstance' AND featureInstanceId = X
- scope = 'mandate' AND mandateId = M (M = mandate of the instance) - scope = 'mandate' AND mandateId = M (M = mandate of the instance)
@ -780,6 +781,15 @@ def buildFilesScopeWhereClause(
scopeParts: List[str] = [] scopeParts: List[str] = []
scopeValues: List = [] scopeValues: List = []
# Personal files created by this user must remain visible even when the request
# carries mandate/instance context (GROUP reads use this clause). Otherwise
# createFile → createFileData → getFile fails and workflow outputs vanish from /files.
# Also treat scope IS NULL as legacy/personal for the owner (column default not applied).
scopeParts.append(
'(("scope" = \'personal\' OR "scope" IS NULL) AND "sysCreatedBy" = %s)'
)
scopeValues.append(currentUser.id)
if featureInstanceId: if featureInstanceId:
# 1) Own personal files of this specific instance # 1) Own personal files of this specific instance
scopeParts.append('("sysCreatedBy" = %s AND "featureInstanceId" = %s)') scopeParts.append('("sysCreatedBy" = %s AND "featureInstanceId" = %s)')

View file

@ -142,6 +142,8 @@ class AiCallLooper:
MAX_MERGE_FAILS = 3 MAX_MERGE_FAILS = 3
mergeFailCount = 0 # Global counter for merge failures across entire loop mergeFailCount = 0 # Global counter for merge failures across entire loop
lastValidCompletePart = None # Store last successfully parsed completePart for fallback lastValidCompletePart = None # Store last successfully parsed completePart for fallback
MAX_CONSECUTIVE_EMPTY_RESPONSES = 3
consecutive_empty_responses = 0
# Get parent operation ID for iteration operations (parentId should be operationId, not log entry ID) # Get parent operation ID for iteration operations (parentId should be operationId, not log entry ID)
parentOperationId = operationId # Use the parent's operationId directly parentOperationId = operationId # Use the parent's operationId directly
@ -284,8 +286,26 @@ class AiCallLooper:
break break
if not result or not result.strip(): if not result or not result.strip():
logger.warning(f"Iteration {iteration}: Empty response, stopping") consecutive_empty_responses += 1
break logger.warning(
"Iteration %s: Empty AI response (consecutive %s/%s) modelName=%s errorCount=%s",
iteration,
consecutive_empty_responses,
MAX_CONSECUTIVE_EMPTY_RESPONSES,
getattr(response, "modelName", None),
getattr(response, "errorCount", None),
)
if iterationOperationId:
self.services.chat.progressLogFinish(iterationOperationId, False)
if consecutive_empty_responses >= MAX_CONSECUTIVE_EMPTY_RESPONSES:
logger.error(
"Stopping loop: %s consecutive empty responses from model",
consecutive_empty_responses,
)
break
continue
consecutive_empty_responses = 0
# Check if this is a text response (not document generation) # Check if this is a text response (not document generation)
# Text responses don't need JSON parsing - return immediately after first successful response # Text responses don't need JSON parsing - return immediately after first successful response
@ -535,7 +555,12 @@ class AiCallLooper:
# This code path should never be reached because all registered use cases # This code path should never be reached because all registered use cases
# return early when JSON is complete. This would only execute for use cases that # return early when JSON is complete. This would only execute for use cases that
# require section extraction, but no such use cases are currently registered. # require section extraction, but no such use cases are currently registered.
logger.error(f"Unexpected code path: reached end of loop without return for use case '{useCaseId}'") logger.error(
"End of callAiWithLooping without success for use case %r (iterations=%s, lastResultLen=%s)",
useCaseId,
iteration,
len(result) if isinstance(result, str) else 0,
)
return result if result else "" return result if result else ""
def _isJsonStringIncomplete(self, jsonString: str) -> bool: def _isJsonStringIncomplete(self, jsonString: str) -> bool:

View file

@ -90,8 +90,7 @@ class StructureGenerator:
) )
try: try:
# Baue Chapter-Struktur-Prompt mit Content-Index structurePrompt, templateStructure = self._buildChapterStructurePrompt(
structurePrompt = self._buildChapterStructurePrompt(
userPrompt=userPrompt, userPrompt=userPrompt,
contentParts=contentParts, contentParts=contentParts,
outputFormat=outputFormat outputFormat=outputFormat
@ -108,12 +107,6 @@ class StructureGenerator:
resultFormat="json" resultFormat="json"
) )
structurePrompt, templateStructure = self._buildChapterStructurePrompt(
userPrompt=userPrompt,
contentParts=contentParts,
outputFormat=outputFormat
)
# Create prompt builder for continuation support # Create prompt builder for continuation support
async def buildChapterStructurePromptWithContinuation( async def buildChapterStructurePromptWithContinuation(
continuationContext: Any, continuationContext: Any,
@ -196,6 +189,13 @@ CRITICAL:
contentParts=None # Do not pass ContentParts - only metadata needed, not content extraction contentParts=None # Do not pass ContentParts - only metadata needed, not content extraction
) )
if not isinstance(aiResponseJson, str) or not aiResponseJson.strip():
raise ValueError(
"Structure generation returned no JSON text from the model (empty response after retries). "
"Check the AI provider, allowed models, billing, and debug artifact "
"'chapter_structure_generation_response'."
)
# Parse the complete JSON response (looping system already handles completion) # Parse the complete JSON response (looping system already handles completion)
extractedJson = self.services.utils.jsonExtractString(aiResponseJson) extractedJson = self.services.utils.jsonExtractString(aiResponseJson)
parsedJson, parseError, cleanedJson = self.services.utils.jsonTryParse(extractedJson) parsedJson, parseError, cleanedJson = self.services.utils.jsonTryParse(extractedJson)
@ -215,7 +215,12 @@ CRITICAL:
raise ValueError(f"Failed to parse JSON structure after repair: {str(parseError)}") raise ValueError(f"Failed to parse JSON structure after repair: {str(parseError)}")
else: else:
logger.error(f"Failed to repair JSON. Parse error: {str(parseError)}") logger.error(f"Failed to repair JSON. Parse error: {str(parseError)}")
logger.error(f"Cleaned JSON preview (first 500 chars): {cleanedJson[:500]}") raw_preview = (extractedJson or "")[:500]
logger.error(
"Raw extract preview (first 500 chars): %r",
raw_preview,
)
logger.error(f"Cleaned JSON preview (first 500 chars): {cleanedJson[:500]!r}")
raise ValueError(f"Failed to parse JSON structure: {str(parseError)}") raise ValueError(f"Failed to parse JSON structure: {str(parseError)}")
else: else:
structure = parsedJson structure = parsedJson

View file

@ -23,7 +23,11 @@ class ChatService:
from modules.interfaces.interfaceDbManagement import getInterface as getComponentInterface from modules.interfaces.interfaceDbManagement import getInterface as getComponentInterface
from modules.interfaces.interfaceDbChat import getInterface as getChatInterface from modules.interfaces.interfaceDbChat import getInterface as getChatInterface
self.interfaceDbApp = getAppInterface(context.user, mandateId=context.mandate_id) self.interfaceDbApp = getAppInterface(context.user, mandateId=context.mandate_id)
self.interfaceDbComponent = getComponentInterface(context.user, mandateId=context.mandate_id) self.interfaceDbComponent = getComponentInterface(
context.user,
mandateId=context.mandate_id,
featureInstanceId=context.feature_instance_id,
)
self.interfaceDbChat = getChatInterface( self.interfaceDbChat = getChatInterface(
context.user, context.user,
mandateId=context.mandate_id, mandateId=context.mandate_id,

View file

@ -79,7 +79,15 @@ class RendererCodeCsv(BaseCodeRenderer):
return renderedDocs return renderedDocs
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None, *, style: Dict[str, Any] = None) -> List[RenderedDocument]: async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
""" """
Render method for document generation compatibility. Render method for document generation compatibility.
Delegates to document renderer if needed, or handles code files directly. Delegates to document renderer if needed, or handles code files directly.
@ -94,7 +102,7 @@ class RendererCodeCsv(BaseCodeRenderer):
# Document generation path - delegate to document renderer # Document generation path - delegate to document renderer
from .rendererCsv import RendererCsv from .rendererCsv import RendererCsv
documentRenderer = RendererCsv(self.services) documentRenderer = RendererCsv(self.services)
return await documentRenderer.render(extractedContent, title, userPrompt, aiService) return await documentRenderer.render(extractedContent, title, userPrompt, aiService, style=style)
def _validateAndFixCsv(self, content: str) -> str: def _validateAndFixCsv(self, content: str) -> str:
"""Validate CSV structure and fix common issues.""" """Validate CSV structure and fix common issues."""

View file

@ -91,7 +91,15 @@ class RendererCodeJson(BaseCodeRenderer):
return renderedDocs return renderedDocs
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None, *, style: Dict[str, Any] = None) -> List[RenderedDocument]: async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
""" """
Render method for document generation compatibility. Render method for document generation compatibility.
Delegates to document renderer if needed, or handles code files directly. Delegates to document renderer if needed, or handles code files directly.
@ -107,7 +115,7 @@ class RendererCodeJson(BaseCodeRenderer):
# Import here to avoid circular dependency # Import here to avoid circular dependency
from .rendererJson import RendererJson from .rendererJson import RendererJson
documentRenderer = RendererJson(self.services) documentRenderer = RendererJson(self.services)
return await documentRenderer.render(extractedContent, title, userPrompt, aiService) return await documentRenderer.render(extractedContent, title, userPrompt, aiService, style=style)
def _extractJsonStatistics(self, parsed: Any) -> Dict[str, Any]: def _extractJsonStatistics(self, parsed: Any) -> Dict[str, Any]:
"""Extract JSON statistics for validation (object count, array count, key count).""" """Extract JSON statistics for validation (object count, array count, key count)."""

View file

@ -78,11 +78,20 @@ class RendererCodeXml(BaseCodeRenderer):
return renderedDocs return renderedDocs
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None, *, style: Dict[str, Any] = None) -> List[RenderedDocument]: async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
""" """
Render method for document generation compatibility. Render method for document generation compatibility.
For XML, we only support code generation (no document renderer exists yet). For XML, we only support code generation (no document renderer exists yet).
""" """
_ = style
# Check if this is code generation (has files array) # Check if this is code generation (has files array)
if "files" in extractedContent: if "files" in extractedContent:
# Code generation path - use renderCodeFiles # Code generation path - use renderCodeFiles

View file

@ -39,8 +39,17 @@ class RendererCsv(BaseRenderer):
""" """
return ["table", "code_block"] return ["table", "code_block"]
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None, *, style: Dict[str, Any] = None) -> List[RenderedDocument]: async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
"""Render extracted JSON content to CSV format. Produces one CSV file per table section.""" """Render extracted JSON content to CSV format. Produces one CSV file per table section."""
_ = style
try: try:
# Validate JSON structure # Validate JSON structure
if not self._validateJsonStructure(extractedContent): if not self._validateJsonStructure(extractedContent):

View file

@ -43,8 +43,17 @@ class RendererImage(BaseRenderer):
""" """
return ["image"] return ["image"]
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None, *, style: Dict[str, Any] = None) -> List[RenderedDocument]: async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
"""Render extracted JSON content to image format using AI image generation.""" """Render extracted JSON content to image format using AI image generation."""
_ = style
try: try:
# Generate AI image from content # Generate AI image from content
imageContent = await self._generateAiImage(extractedContent, title, userPrompt, aiService) imageContent = await self._generateAiImage(extractedContent, title, userPrompt, aiService)

View file

@ -42,8 +42,17 @@ class RendererJson(BaseRenderer):
# Return all types except image # Return all types except image
return [st for st in supportedSectionTypes if st != "image"] return [st for st in supportedSectionTypes if st != "image"]
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None, *, style: Dict[str, Any] = None) -> List[RenderedDocument]: async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
"""Render extracted JSON content to JSON format.""" """Render extracted JSON content to JSON format."""
_ = style
try: try:
# The extracted content should already be JSON from the AI # The extracted content should already be JSON from the AI
# Just validate and format it # Just validate and format it

View file

@ -40,8 +40,17 @@ class RendererMarkdown(BaseRenderer):
from modules.datamodels.datamodelJson import supportedSectionTypes from modules.datamodels.datamodelJson import supportedSectionTypes
return [st for st in supportedSectionTypes if st != "image"] return [st for st in supportedSectionTypes if st != "image"]
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None, *, style: Dict[str, Any] = None) -> List[RenderedDocument]: async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
"""Render extracted JSON content to Markdown format.""" """Render extracted JSON content to Markdown format."""
_ = style
try: try:
# Generate markdown from JSON structure # Generate markdown from JSON structure
markdownContent = self._generateMarkdownFromJson(extractedContent, title) markdownContent = self._generateMarkdownFromJson(extractedContent, title)

View file

@ -8,7 +8,7 @@ import re
from .documentRendererBaseTemplate import BaseRenderer from .documentRendererBaseTemplate import BaseRenderer
from modules.datamodels.datamodelDocument import RenderedDocument from modules.datamodels.datamodelDocument import RenderedDocument
from typing import Dict, Any, List, Optional from typing import Dict, Any, List, Optional, Union
class RendererText(BaseRenderer): class RendererText(BaseRenderer):
"""Renders content to plain text format with format-specific extraction.""" """Renders content to plain text format with format-specific extraction."""
@ -76,8 +76,17 @@ class RendererText(BaseRenderer):
# Text renderer accepts all types except images # Text renderer accepts all types except images
return [st for st in supportedSectionTypes if st != "image"] return [st for st in supportedSectionTypes if st != "image"]
async def render(self, extractedContent: Dict[str, Any], title: str, userPrompt: str = None, aiService=None, *, style: Dict[str, Any] = None) -> List[RenderedDocument]: async def render(
self,
extractedContent: Dict[str, Any],
title: str,
userPrompt: str = None,
aiService=None,
*,
style: Dict[str, Any] = None,
) -> List[RenderedDocument]:
"""Render extracted JSON content to plain text format.""" """Render extracted JSON content to plain text format."""
_ = style # unified style from renderReport; plain text ignores formatting hints
try: try:
# Generate text from JSON structure # Generate text from JSON structure
textContent = self._generateTextFromJson(extractedContent, title) textContent = self._generateTextFromJson(extractedContent, title)
@ -263,16 +272,16 @@ class RendererText(BaseRenderer):
textParts = [] textParts = []
# Create table header # Create table header
headerLine = " | ".join(str(header) for header in headers) headerLine = " | ".join(self._tableCellToPlainText(h) for h in headers)
textParts.append(headerLine) textParts.append(headerLine)
# Add separator line # Add separator line
separatorLine = " | ".join("-" * len(str(header)) for header in headers) separatorLine = " | ".join("-" * len(self._tableCellToPlainText(h)) for h in headers)
textParts.append(separatorLine) textParts.append(separatorLine)
# Add data rows # Add data rows
for row in rows: for row in rows:
rowLine = " | ".join(str(cellData) for cellData in row) rowLine = " | ".join(self._tableCellToPlainText(cellData) for cellData in row)
textParts.append(rowLine) textParts.append(rowLine)
return '\n'.join(textParts) return '\n'.join(textParts)
@ -299,6 +308,9 @@ class RendererText(BaseRenderer):
textParts.append(f"- {self._stripMarkdownForPlainText(item)}") textParts.append(f"- {self._stripMarkdownForPlainText(item)}")
elif isinstance(item, dict) and "text" in item: elif isinstance(item, dict) and "text" in item:
textParts.append(f"- {self._stripMarkdownForPlainText(item['text'])}") textParts.append(f"- {self._stripMarkdownForPlainText(item['text'])}")
elif isinstance(item, list):
# markdownToDocumentJson: each item is List[InlineRun]
textParts.append(f"- {self._inlineRunsToPlainText(item)}")
return '\n'.join(textParts) return '\n'.join(textParts)
@ -345,12 +357,54 @@ class RendererText(BaseRenderer):
text = re.sub(r'`([^`]+)`', r'\1', text) text = re.sub(r'`([^`]+)`', r'\1', text)
return text.strip() return text.strip()
def _inlineRunsToPlainText(self, runs: Union[List[Any], Any]) -> str:
"""Flatten InlineRun dicts (from markdownToDocumentJson) to a single string."""
if runs is None:
return ""
if isinstance(runs, dict):
runs = [runs]
if not isinstance(runs, list):
return self._stripMarkdownForPlainText(str(runs))
parts: List[str] = []
for run in runs:
if not isinstance(run, dict):
parts.append(str(run))
continue
t = run.get("type") or "text"
val = run.get("value", "")
if t == "text":
parts.append(str(val))
elif t in ("bold", "italic", "code"):
parts.append(str(val))
elif t == "link":
parts.append(str(val))
elif t == "image":
parts.append(f"[{val}]")
else:
parts.append(str(val))
return "".join(parts)
def _tableCellToPlainText(self, cell: Any) -> str:
"""Table header/cell: plain str, legacy dict, or List[InlineRun]."""
if cell is None:
return ""
if isinstance(cell, str):
return self._stripMarkdownForPlainText(cell)
if isinstance(cell, list):
return self._inlineRunsToPlainText(cell)
if isinstance(cell, dict) and "text" in cell:
return self._stripMarkdownForPlainText(str(cell["text"]))
return self._stripMarkdownForPlainText(str(cell))
def _renderJsonParagraph(self, paragraphData: Dict[str, Any]) -> str: def _renderJsonParagraph(self, paragraphData: Dict[str, Any]) -> str:
"""Render a JSON paragraph to text. Strips markdown for plain text output.""" """Render a JSON paragraph to text. Strips markdown for plain text output."""
try: try:
# Extract from nested content structure # Extract from nested content structure
content = paragraphData.get("content", {}) content = paragraphData.get("content", {})
if isinstance(content, dict): if isinstance(content, dict):
runs = self._inlineRunsFromContent(content)
if runs:
return self._stripMarkdownForPlainText(self._inlineRunsToPlainText(runs))
text = content.get("text", "") text = content.get("text", "")
elif isinstance(content, str): elif isinstance(content, str):
text = content text = content

View file

@ -326,11 +326,25 @@ class ActionNodeExecutor:
if isinstance(dumped, dict) and isinstance(rawData, bytes) and len(rawData) > 0: if isinstance(dumped, dict) and isinstance(rawData, bytes) and len(rawData) > 0:
try: try:
from modules.interfaces.interfaceDbManagement import getInterface as _getMgmtInterface from modules.interfaces.interfaceDbManagement import getInterface as _getMgmtInterface
from modules.interfaces.interfaceDbApp import getInterface as _getAppInterface
from modules.security.rootAccess import getRootUser from modules.security.rootAccess import getRootUser
_userId = context.get("userId") _userId = context.get("userId")
_mandateId = context.get("mandateId") _mandateId = context.get("mandateId")
_instanceId = context.get("instanceId") _instanceId = context.get("instanceId")
_mgmt = _getMgmtInterface(getRootUser(), mandateId=_mandateId, featureInstanceId=_instanceId) _owner = None
if _userId:
try:
_umap = _getAppInterface(getRootUser()).getUsersByIds([str(_userId)])
_owner = _umap.get(str(_userId))
except Exception as _ue:
logger.warning("Could not resolve workflow user for file persistence: %s", _ue)
if _owner is None:
_owner = getRootUser()
logger.debug(
"Persisting workflow document as root user (no resolved owner userId=%r)",
_userId,
)
_mgmt = _getMgmtInterface(_owner, mandateId=_mandateId, featureInstanceId=_instanceId)
_docName = dumped.get("documentName") or f"workflow-result-{nodeId}.bin" _docName = dumped.get("documentName") or f"workflow-result-{nodeId}.bin"
_mimeType = dumped.get("mimeType") or "application/octet-stream" _mimeType = dumped.get("mimeType") or "application/octet-stream"
_fileItem = _mgmt.createFile(_docName, _mimeType, rawData) _fileItem = _mgmt.createFile(_docName, _mimeType, rawData)
@ -345,6 +359,20 @@ class ActionNodeExecutor:
dumped["_hasBinaryData"] = True dumped["_hasBinaryData"] = True
docsList.append(dumped) docsList.append(dumped)
# Clean DocumentList shape for document nodes (match file.create: documents + count, no AiResult fields)
if outputSchema == "DocumentList" and nodeType in ("ai.generateDocument", "ai.convertDocument"):
if not result.success:
return _normalizeError(
RuntimeError(str(result.error or "document action failed")),
outputSchema,
)
list_out: Dict[str, Any] = {
"documents": docsList,
"count": len(docsList),
}
_attachConnectionProvenance(list_out, resolvedParams, outputSchema, chatService, self.services)
return normalizeToSchema(list_out, outputSchema)
extractedContext = "" extractedContext = ""
if result.documents: if result.documents:
doc = result.documents[0] doc = result.documents[0]

View file

@ -7,6 +7,50 @@ from typing import Dict, List, Any, Tuple, Set, Optional
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
def _ai_result_text_from_documents(d: Dict[str, Any]) -> Optional[str]:
"""Extract plain-text body from AiResult-style ``documents[0].documentData``."""
docs = d.get("documents")
if not isinstance(docs, list) or not docs:
return None
d0 = docs[0]
raw: Any = None
if isinstance(d0, dict):
raw = d0.get("documentData")
elif d0 is not None:
raw = getattr(d0, "documentData", None)
if raw is None:
return None
if isinstance(raw, bytes):
try:
t = raw.decode("utf-8").strip()
return t or None
except (UnicodeDecodeError, ValueError):
return None
if isinstance(raw, str):
s = raw.strip()
return s or None
return None
def _ref_coalesce_empty_ai_result_text(data: Any, path: List[Any], resolved: Any) -> Any:
"""If a ref targets AiResult text fields but resolves empty/missing, fall back to documents.
Needed when: optional ``responseData`` is absent (no synthetic ``{}``), ``response`` is
still empty but ``documents`` hold the model output, or legacy graphs bind responseData only.
"""
if resolved not in (None, ""):
return resolved
if not isinstance(data, dict) or not path:
return resolved
head = path[0]
if head not in ("response", "responseData", "context"):
return resolved
if head == "context" and len(path) != 1:
return resolved
fb = _ai_result_text_from_documents(data)
return fb if fb is not None else resolved
def parseGraph(graph: Dict[str, Any]) -> Tuple[List[Dict], List[Dict], Set[str]]: def parseGraph(graph: Dict[str, Any]) -> Tuple[List[Dict], List[Dict], Set[str]]:
""" """
Parse graph into nodes, connections, and node IDs. Parse graph into nodes, connections, and node IDs.
@ -356,14 +400,15 @@ def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any:
data = data.get("data", data) data = data.get("data", data)
plist = list(path) plist = list(path)
resolved = _get_by_path(data, plist) resolved = _get_by_path(data, plist)
if ( if resolved is None and isinstance(data, dict) and plist:
resolved is None if plist[0] == "payload" and len(plist) > 1:
and isinstance(data, dict) # Strip explicit "payload" prefix (legacy DataPicker paths)
and plist resolved = _get_by_path(data, plist[1:])
and plist[0] == "payload" elif "payload" in data and isinstance(data["payload"], dict):
and len(plist) > 1 # Form nodes store fields under {"payload": {fieldName: …}}.
): # DataPicker emits bare field paths like ["url"]; try under payload.
resolved = _get_by_path(data, plist[1:]) resolved = _get_by_path(data["payload"], plist)
resolved = _ref_coalesce_empty_ai_result_text(data, plist, resolved)
return resolveParameterReferences(resolved, nodeOutputs) return resolveParameterReferences(resolved, nodeOutputs)
return value return value
if value.get("type") == "value": if value.get("type") == "value":
@ -386,16 +431,27 @@ def resolveParameterReferences(value: Any, nodeOutputs: Dict[str, Any]) -> Any:
if len(parts) < 2: if len(parts) < 2:
return json.dumps(data) if isinstance(data, (dict, list)) else str(data) return json.dumps(data) if isinstance(data, (dict, list)) else str(data)
rest = ".".join(parts[1:]) rest = ".".join(parts[1:])
if data is None:
def _walk(root, keys):
cur = root
for k in keys:
if isinstance(cur, dict) and k in cur:
cur = cur[k]
elif isinstance(cur, (list, tuple)) and k.isdigit():
cur = cur[int(k)]
else:
return None
return cur
keys = rest.split(".")
result = _walk(data, keys)
# Form nodes store fields under {"payload": {field: …}}.
# Fall back to looking under "payload" when the direct path misses.
if result is None and isinstance(data, dict) and "payload" in data:
result = _walk(data["payload"], keys)
if result is None:
return m.group(0) return m.group(0)
for k in rest.split("."): return str(result) if not isinstance(result, (dict, list)) else json.dumps(result, ensure_ascii=False)
if isinstance(data, dict) and k in data:
data = data[k]
elif isinstance(data, (list, tuple)) and k.isdigit():
data = data[int(k)]
else:
return m.group(0)
return str(data) if data is not None else m.group(0)
return re.sub(r"\{\{\s*([^}]+)\s*\}\}", repl, value) return re.sub(r"\{\{\s*([^}]+)\s*\}\}", repl, value)
if isinstance(value, list): if isinstance(value, list):
# contextBuilder: list where every item is a `{"type":"ref",...}` envelope. # contextBuilder: list where every item is a `{"type":"ref",...}` envelope.

View file

@ -11,12 +11,15 @@ def serialize_context(val: Any) -> str:
"""Convert any context value to a readable string for use in AI prompts. """Convert any context value to a readable string for use in AI prompts.
- None / empty string "" - None / empty string ""
- empty dict (no keys) "" (avoids literal "{}" in file.create / prompts)
- str as-is - str as-is
- dict / list pretty-printed JSON - dict / list pretty-printed JSON
- anything else str() - anything else str()
""" """
if val is None or val == "" or val == []: if val is None or val == "" or val == []:
return "" return ""
if isinstance(val, dict) and len(val) == 0:
return ""
if isinstance(val, str): if isinstance(val, str):
return val.strip() return val.strip()
try: try:

View file

@ -23,8 +23,10 @@ async def generateDocument(self, parameters: Dict[str, Any]) -> ActionResult:
documentList = parameters.get("documentList", []) documentList = parameters.get("documentList", [])
documentType = parameters.get("documentType") documentType = parameters.get("documentType")
# Optional: if omitted, formats determined from prompt by AI # Prefer explicit outputFormat (flow UI); resultType remains for legacy / API callers.
resultType = parameters.get("resultType") resultType = parameters.get("outputFormat") or parameters.get("resultType")
if isinstance(resultType, str):
resultType = resultType.strip().lstrip(".").lower() or None
if not resultType: if not resultType:
logger.debug("resultType not provided - formats will be determined from prompt by AI") logger.debug("resultType not provided - formats will be determined from prompt by AI")
@ -49,8 +51,12 @@ async def generateDocument(self, parameters: Dict[str, Any]) -> ActionResult:
else: else:
docRefList = DocumentReferenceList(references=[]) docRefList = DocumentReferenceList(references=[])
# Prepare title title_raw = parameters.get("title")
title = parameters.get("documentType") or "Generated Document" title = (title_raw.strip() if isinstance(title_raw, str) else "") or None
if not title and isinstance(documentType, str) and documentType.strip():
title = documentType.strip()
if not title:
title = "Generated Document"
# Call AI service for document generation # Call AI service for document generation
# callAiContent handles documentList internally via Phases 5A-5E # callAiContent handles documentList internally via Phases 5A-5E
@ -98,6 +104,8 @@ async def generateDocument(self, parameters: Dict[str, Any]) -> ActionResult:
"actionType": "ai.generateDocument", "actionType": "ai.generateDocument",
"documentType": documentType, "documentType": documentType,
"resultType": resultType, "resultType": resultType,
"outputFormat": resultType,
"title": title,
} }
)) ))
@ -119,14 +127,15 @@ async def generateDocument(self, parameters: Dict[str, Any]) -> ActionResult:
docName = sanitized docName = sanitized
# Determine mime type # Determine mime type
rt = resultTypeFallback
mimeType = "text/plain" mimeType = "text/plain"
if resultType == "html": if rt == "html":
mimeType = "text/html" mimeType = "text/html"
elif resultType == "json": elif rt == "json":
mimeType = "application/json" mimeType = "application/json"
elif resultType == "pdf": elif rt == "pdf":
mimeType = "application/pdf" mimeType = "application/pdf"
elif resultType == "md": elif rt == "md":
mimeType = "text/markdown" mimeType = "text/markdown"
documents.append(ActionDocument( documents.append(ActionDocument(
@ -137,6 +146,8 @@ async def generateDocument(self, parameters: Dict[str, Any]) -> ActionResult:
"actionType": "ai.generateDocument", "actionType": "ai.generateDocument",
"documentType": documentType, "documentType": documentType,
"resultType": resultType, "resultType": resultType,
"outputFormat": resultType,
"title": title,
} }
)) ))

View file

@ -289,6 +289,30 @@ class MethodAi(MethodBase):
required=True, required=True,
description="Description of the document to generate" description="Description of the document to generate"
), ),
"outputFormat": WorkflowActionParameter(
name="outputFormat",
type="str",
frontendType=FrontendType.SELECT,
frontendOptions=["docx", "pdf", "txt", "html", "md"],
required=False,
default="docx",
description="Rendered output format (same choices as file.create). If omitted alongside resultType, the model may infer format from the prompt."
),
"title": WorkflowActionParameter(
name="title",
type="str",
frontendType=FrontendType.TEXT,
required=False,
description="Document title / metadata (optional); used as generation title and for file naming hints."
),
"context": WorkflowActionParameter(
name="context",
type="Any",
frontendType=FrontendType.TEXTAREA,
required=False,
default="",
description="Additional structured or text context from upstream steps; serialized into the prompt."
),
"documentList": WorkflowActionParameter( "documentList": WorkflowActionParameter(
name="documentList", name="documentList",
type="DocumentList", type="DocumentList",
@ -302,16 +326,15 @@ class MethodAi(MethodBase):
frontendType=FrontendType.SELECT, frontendType=FrontendType.SELECT,
frontendOptions=["letter", "memo", "proposal", "contract", "report", "email"], frontendOptions=["letter", "memo", "proposal", "contract", "report", "email"],
required=False, required=False,
description="Type of document" description="Type of document (content hint for the model); used as title fallback when title is empty."
), ),
"resultType": WorkflowActionParameter( "resultType": WorkflowActionParameter(
name="resultType", name="resultType",
type="str", type="str",
frontendType=FrontendType.TEXT, frontendType=FrontendType.TEXT,
required=False, required=False,
default="txt", description="Legacy/API output format extension (e.g. txt, docx). Ignored when outputFormat is set."
description="Output format (e.g., txt, html, pdf, docx, md, json, csv, xlsx, pptx, png, jpg). Optional: if omitted, formats are determined from prompt by AI. Default \"txt\" is validation fallback only. With per-document format determination, AI can determine different formats for different documents based on prompt." ),
)
}, },
execute=generateDocument.__get__(self, self.__class__) execute=generateDocument.__get__(self, self.__class__)
), ),

View file

@ -35,6 +35,12 @@ def _persistDocumentsToUserFiles(
return return
if not mgmt: if not mgmt:
return return
logger.info(
"file.create persist: mgmt=%s id(mgmt)=%s has_createFileData=%s",
type(mgmt).__name__,
id(mgmt),
hasattr(mgmt, "createFileData"),
)
for doc in action_documents: for doc in action_documents:
try: try:
doc_data = doc.documentData if hasattr(doc, "documentData") else doc.get("documentData") doc_data = doc.documentData if hasattr(doc, "documentData") else doc.get("documentData")
@ -54,8 +60,15 @@ def _persistDocumentsToUserFiles(
or doc.get("mimeType") or doc.get("mimeType")
or "application/octet-stream" or "application/octet-stream"
) )
logger.info(
"file.create persist: calling createFile name=%s bytes=%s",
doc_name,
len(content),
)
file_item = mgmt.createFile(doc_name, mime, content) file_item = mgmt.createFile(doc_name, mime, content)
mgmt.createFileData(file_item.id, content) logger.info("file.create persist: createFile returned id=%s", file_item.id)
ok = mgmt.createFileData(file_item.id, content)
logger.info("file.create persist: createFileData returned %s for id=%s", ok, file_item.id)
meta = getattr(doc, "validationMetadata", None) or doc.get("validationMetadata") or {} meta = getattr(doc, "validationMetadata", None) or doc.get("validationMetadata") or {}
if isinstance(meta, dict): if isinstance(meta, dict):
meta["fileId"] = file_item.id meta["fileId"] = file_item.id
@ -79,6 +92,10 @@ async def create(self, parameters: Dict[str, Any]) -> ActionResult:
context = serialize_context(raw_context) context = serialize_context(raw_context)
if not context: if not context:
logger.warning(
"file.create: context empty after resolve — check DataRefs (e.g. Antworttext / "
"documents[0].documentData from the AI step)."
)
return ActionResult.isFailure(error="context is required (connect an AI node or provide text)") return ActionResult.isFailure(error="context is required (connect an AI node or provide text)")
outputFormat = (parameters.get("outputFormat") or "docx").strip().lower().lstrip(".") outputFormat = (parameters.get("outputFormat") or "docx").strip().lower().lstrip(".")