from collections.abc import Mapping, Sequence from datetime import datetime from enum import StrEnum from typing import Any from pydantic import Field from dify_graph.entities import ToolCall, ToolResult from dify_graph.entities.pause_reason import PauseReason from dify_graph.file import File from dify_graph.model_runtime.entities.llm_entities import LLMUsage from dify_graph.node_events import NodeRunResult from .base import NodeEventBase class RunRetrieverResourceEvent(NodeEventBase): retriever_resources: Sequence[Mapping[str, Any]] = Field(..., description="retriever resources") context: str = Field(..., description="context") context_files: list[File] | None = Field(default=None, description="context files") class ModelInvokeCompletedEvent(NodeEventBase): text: str usage: LLMUsage finish_reason: str | None = None reasoning_content: str | None = None structured_output: dict | None = None class RunRetryEvent(NodeEventBase): error: str = Field(..., description="error") retry_index: int = Field(..., description="Retry attempt number") start_at: datetime = Field(..., description="Retry start time") class ChunkType(StrEnum): """Stream chunk type for LLM-related events.""" TEXT = "text" # Normal text streaming TOOL_CALL = "tool_call" # Tool call arguments streaming TOOL_RESULT = "tool_result" # Tool execution result THOUGHT = "thought" # Agent thinking process (ReAct) THOUGHT_START = "thought_start" # Agent thought start THOUGHT_END = "thought_end" # Agent thought end MODEL_START = "model_start" # Model turn started with identity info MODEL_END = "model_end" # Model turn completed with metrics class StreamChunkEvent(NodeEventBase): """Base stream chunk event - normal text streaming output.""" selector: Sequence[str] = Field( ..., description="selector identifying the output location (e.g., ['nodeA', 'text'])" ) chunk: str = Field(..., description="the actual chunk content") is_final: bool = Field(default=False, description="indicates if this is the last chunk") chunk_type: ChunkType = Field(default=ChunkType.TEXT, description="type of the chunk") tool_call: ToolCall | None = Field(default=None, description="structured payload for tool_call chunks") tool_result: ToolResult | None = Field(default=None, description="structured payload for tool_result chunks") # Model identity fields (when chunk_type == MODEL_START) model_provider: str | None = Field(default=None, description="model provider identifier") model_name: str | None = Field(default=None, description="model name") model_icon: str | dict | None = Field(default=None, description="model provider icon") model_icon_dark: str | dict | None = Field(default=None, description="model provider dark icon") # Model metrics fields (when chunk_type == MODEL_END) model_usage: LLMUsage | None = Field(default=None, description="per-turn token usage") model_duration: float | None = Field(default=None, description="per-turn duration in seconds") class ToolCallChunkEvent(StreamChunkEvent): """Tool call streaming event - tool call arguments streaming output.""" chunk_type: ChunkType = Field(default=ChunkType.TOOL_CALL, frozen=True) tool_call: ToolCall | None = Field(default=None, description="structured tool call payload") class ToolResultChunkEvent(StreamChunkEvent): """Tool result event - tool execution result.""" chunk_type: ChunkType = Field(default=ChunkType.TOOL_RESULT, frozen=True) tool_result: ToolResult | None = Field(default=None, description="structured tool result payload") class ThoughtStartChunkEvent(StreamChunkEvent): """Agent thought start streaming event - Agent thinking process (ReAct).""" chunk_type: ChunkType = Field(default=ChunkType.THOUGHT_START, frozen=True) class ThoughtEndChunkEvent(StreamChunkEvent): """Agent thought end streaming event - Agent thinking process (ReAct).""" chunk_type: ChunkType = Field(default=ChunkType.THOUGHT_END, frozen=True) class ThoughtChunkEvent(StreamChunkEvent): """Agent thought streaming event - Agent thinking process (ReAct).""" chunk_type: ChunkType = Field(default=ChunkType.THOUGHT, frozen=True) class StreamCompletedEvent(NodeEventBase): node_run_result: NodeRunResult = Field(..., description="run result") class PauseRequestedEvent(NodeEventBase): reason: PauseReason = Field(..., description="pause reason") class HumanInputFormFilledEvent(NodeEventBase): """Event emitted when a human input form is submitted.""" node_title: str rendered_content: str action_id: str action_text: str class HumanInputFormTimeoutEvent(NodeEventBase): """Event emitted when a human input form times out.""" node_title: str expiration_time: datetime