From 2fb391a64258bfa300f98ed8353d81bb6bdfd883 Mon Sep 17 00:00:00 2001 From: Novice Date: Tue, 27 Jan 2026 15:28:04 +0800 Subject: [PATCH] fix: generation stream abort --- api/core/workflow/nodes/llm/node.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index d2d7c8d425..6ab0e390ed 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -421,8 +421,9 @@ class LLMNode(Node[LLMNodeData]): outputs["structured_output"] = structured_output.structured_output # Send final chunk event to indicate streaming is complete - if not self.tool_call_enabled: - # For tool calls and sandbox, final events are already sent in _process_tool_outputs + # For tool calls and sandbox, final events are already sent in _process_tool_outputs + sandbox_used = sandbox and has_skill_prompt + if not self.tool_call_enabled and not sandbox_used: yield StreamChunkEvent( selector=[self._node_id, "text"], chunk="",