mirror of
https://github.com/langgenius/dify.git
synced 2026-03-25 14:01:06 -04:00
Made-with: Cursor # Conflicts: # .devcontainer/post_create_command.sh # api/commands.py # api/core/agent/cot_agent_runner.py # api/core/agent/fc_agent_runner.py # api/core/app/apps/workflow_app_runner.py # api/core/app/entities/queue_entities.py # api/core/app/entities/task_entities.py # api/core/workflow/workflow_entry.py # api/dify_graph/enums.py # api/dify_graph/graph/graph.py # api/dify_graph/graph_events/node.py # api/dify_graph/model_runtime/entities/message_entities.py # api/dify_graph/node_events/node.py # api/dify_graph/nodes/agent/agent_node.py # api/dify_graph/nodes/base/__init__.py # api/dify_graph/nodes/base/entities.py # api/dify_graph/nodes/base/node.py # api/dify_graph/nodes/llm/entities.py # api/dify_graph/nodes/llm/node.py # api/dify_graph/nodes/tool/tool_node.py # api/pyproject.toml # api/uv.lock # web/app/components/base/avatar/__tests__/index.spec.tsx # web/app/components/base/avatar/index.tsx # web/app/components/base/date-and-time-picker/time-picker/__tests__/index.spec.tsx # web/app/components/base/file-uploader/file-from-link-or-local/index.tsx # web/app/components/base/prompt-editor/index.tsx # web/app/components/datasets/metadata/edit-metadata-batch/modal.tsx # web/app/components/header/account-dropdown/index.spec.tsx # web/app/components/share/text-generation/index.tsx # web/app/components/workflow/block-selector/tool/action-item.tsx # web/app/components/workflow/block-selector/trigger-plugin/action-item.tsx # web/app/components/workflow/hooks/use-edges-interactions.ts # web/app/components/workflow/hooks/use-nodes-interactions.ts # web/app/components/workflow/index.tsx # web/app/components/workflow/nodes/_base/components/editor/code-editor/index.tsx # web/app/components/workflow/nodes/http/components/key-value/key-value-edit/index.tsx # web/app/components/workflow/nodes/human-input/components/delivery-method/recipient/email-item.tsx # web/app/components/workflow/nodes/loop/use-interactions.ts # web/contract/router.ts # web/env.ts # web/eslint-suppressions.json # web/package.json # web/pnpm-lock.yaml
125 lines
4.8 KiB
Python
125 lines
4.8 KiB
Python
from collections.abc import Mapping, Sequence
|
|
from datetime import datetime
|
|
from enum import StrEnum
|
|
from typing import Any
|
|
|
|
from pydantic import Field
|
|
|
|
from dify_graph.entities import ToolCall, ToolResult
|
|
from dify_graph.entities.pause_reason import PauseReason
|
|
from dify_graph.file import File
|
|
from dify_graph.model_runtime.entities.llm_entities import LLMUsage
|
|
from dify_graph.node_events import NodeRunResult
|
|
|
|
from .base import NodeEventBase
|
|
|
|
|
|
class RunRetrieverResourceEvent(NodeEventBase):
|
|
retriever_resources: Sequence[Mapping[str, Any]] = Field(..., description="retriever resources")
|
|
context: str = Field(..., description="context")
|
|
context_files: list[File] | None = Field(default=None, description="context files")
|
|
|
|
|
|
class ModelInvokeCompletedEvent(NodeEventBase):
|
|
text: str
|
|
usage: LLMUsage
|
|
finish_reason: str | None = None
|
|
reasoning_content: str | None = None
|
|
structured_output: dict | None = None
|
|
|
|
|
|
class RunRetryEvent(NodeEventBase):
|
|
error: str = Field(..., description="error")
|
|
retry_index: int = Field(..., description="Retry attempt number")
|
|
start_at: datetime = Field(..., description="Retry start time")
|
|
|
|
|
|
class ChunkType(StrEnum):
|
|
"""Stream chunk type for LLM-related events."""
|
|
|
|
TEXT = "text" # Normal text streaming
|
|
TOOL_CALL = "tool_call" # Tool call arguments streaming
|
|
TOOL_RESULT = "tool_result" # Tool execution result
|
|
THOUGHT = "thought" # Agent thinking process (ReAct)
|
|
THOUGHT_START = "thought_start" # Agent thought start
|
|
THOUGHT_END = "thought_end" # Agent thought end
|
|
MODEL_START = "model_start" # Model turn started with identity info
|
|
MODEL_END = "model_end" # Model turn completed with metrics
|
|
|
|
|
|
class StreamChunkEvent(NodeEventBase):
|
|
"""Base stream chunk event - normal text streaming output."""
|
|
|
|
selector: Sequence[str] = Field(
|
|
..., description="selector identifying the output location (e.g., ['nodeA', 'text'])"
|
|
)
|
|
chunk: str = Field(..., description="the actual chunk content")
|
|
is_final: bool = Field(default=False, description="indicates if this is the last chunk")
|
|
chunk_type: ChunkType = Field(default=ChunkType.TEXT, description="type of the chunk")
|
|
tool_call: ToolCall | None = Field(default=None, description="structured payload for tool_call chunks")
|
|
tool_result: ToolResult | None = Field(default=None, description="structured payload for tool_result chunks")
|
|
# Model identity fields (when chunk_type == MODEL_START)
|
|
model_provider: str | None = Field(default=None, description="model provider identifier")
|
|
model_name: str | None = Field(default=None, description="model name")
|
|
model_icon: str | dict | None = Field(default=None, description="model provider icon")
|
|
model_icon_dark: str | dict | None = Field(default=None, description="model provider dark icon")
|
|
# Model metrics fields (when chunk_type == MODEL_END)
|
|
model_usage: LLMUsage | None = Field(default=None, description="per-turn token usage")
|
|
model_duration: float | None = Field(default=None, description="per-turn duration in seconds")
|
|
|
|
|
|
class ToolCallChunkEvent(StreamChunkEvent):
|
|
"""Tool call streaming event - tool call arguments streaming output."""
|
|
|
|
chunk_type: ChunkType = Field(default=ChunkType.TOOL_CALL, frozen=True)
|
|
tool_call: ToolCall | None = Field(default=None, description="structured tool call payload")
|
|
|
|
|
|
class ToolResultChunkEvent(StreamChunkEvent):
|
|
"""Tool result event - tool execution result."""
|
|
|
|
chunk_type: ChunkType = Field(default=ChunkType.TOOL_RESULT, frozen=True)
|
|
tool_result: ToolResult | None = Field(default=None, description="structured tool result payload")
|
|
|
|
|
|
class ThoughtStartChunkEvent(StreamChunkEvent):
|
|
"""Agent thought start streaming event - Agent thinking process (ReAct)."""
|
|
|
|
chunk_type: ChunkType = Field(default=ChunkType.THOUGHT_START, frozen=True)
|
|
|
|
|
|
class ThoughtEndChunkEvent(StreamChunkEvent):
|
|
"""Agent thought end streaming event - Agent thinking process (ReAct)."""
|
|
|
|
chunk_type: ChunkType = Field(default=ChunkType.THOUGHT_END, frozen=True)
|
|
|
|
|
|
class ThoughtChunkEvent(StreamChunkEvent):
|
|
"""Agent thought streaming event - Agent thinking process (ReAct)."""
|
|
|
|
chunk_type: ChunkType = Field(default=ChunkType.THOUGHT, frozen=True)
|
|
|
|
|
|
class StreamCompletedEvent(NodeEventBase):
|
|
node_run_result: NodeRunResult = Field(..., description="run result")
|
|
|
|
|
|
class PauseRequestedEvent(NodeEventBase):
|
|
reason: PauseReason = Field(..., description="pause reason")
|
|
|
|
|
|
class HumanInputFormFilledEvent(NodeEventBase):
|
|
"""Event emitted when a human input form is submitted."""
|
|
|
|
node_title: str
|
|
rendered_content: str
|
|
action_id: str
|
|
action_text: str
|
|
|
|
|
|
class HumanInputFormTimeoutEvent(NodeEventBase):
|
|
"""Event emitted when a human input form times out."""
|
|
|
|
node_title: str
|
|
expiration_time: datetime
|