mirror of
https://github.com/langgenius/dify.git
synced 2026-03-25 14:01:06 -04:00
Made-with: Cursor # Conflicts: # api/controllers/console/app/workflow_draft_variable.py # api/core/agent/cot_agent_runner.py # api/core/agent/cot_chat_agent_runner.py # api/core/agent/cot_completion_agent_runner.py # api/core/agent/fc_agent_runner.py # api/core/app/apps/advanced_chat/app_generator.py # api/core/app/apps/advanced_chat/app_runner.py # api/core/app/apps/agent_chat/app_runner.py # api/core/app/apps/workflow/app_generator.py # api/core/app/apps/workflow/app_runner.py # api/core/app/entities/app_invoke_entities.py # api/core/app/entities/queue_entities.py # api/core/llm_generator/output_parser/structured_output.py # api/core/workflow/workflow_entry.py # api/dify_graph/context/__init__.py # api/dify_graph/entities/tool_entities.py # api/dify_graph/file/file_manager.py # api/dify_graph/graph_engine/response_coordinator/coordinator.py # api/dify_graph/graph_events/node.py # api/dify_graph/node_events/node.py # api/dify_graph/nodes/agent/agent_node.py # api/dify_graph/nodes/llm/entities.py # api/dify_graph/nodes/llm/llm_utils.py # api/dify_graph/nodes/llm/node.py # api/dify_graph/nodes/question_classifier/question_classifier_node.py # api/dify_graph/runtime/graph_runtime_state.py # api/dify_graph/variables/segments.py # api/factories/variable_factory.py # api/services/variable_truncator.py # api/tests/unit_tests/utils/structured_output_parser/test_structured_output_parser.py # api/uv.lock # web/app/components/app-sidebar/app-info.tsx # web/app/components/app-sidebar/app-sidebar-dropdown.tsx # web/app/components/app/create-app-modal/index.spec.tsx # web/app/components/apps/__tests__/list.spec.tsx # web/app/components/apps/app-card.tsx # web/app/components/apps/list.tsx # web/app/components/header/account-dropdown/compliance.tsx # web/app/components/header/account-dropdown/index.tsx # web/app/components/header/account-dropdown/support.tsx # web/app/components/workflow-app/components/workflow-onboarding-modal/index.tsx # web/app/components/workflow/panel/debug-and-preview/hooks.ts # web/contract/console/apps.ts # web/contract/router.ts # web/eslint-suppressions.json # web/next.config.ts # web/pnpm-lock.yaml
125 lines
4.8 KiB
Python
125 lines
4.8 KiB
Python
from collections.abc import Sequence
|
|
from datetime import datetime
|
|
from enum import StrEnum
|
|
|
|
from pydantic import Field
|
|
|
|
from core.rag.entities.citation_metadata import RetrievalSourceMetadata
|
|
from dify_graph.entities import ToolCall, ToolResult
|
|
from dify_graph.entities.pause_reason import PauseReason
|
|
from dify_graph.file import File
|
|
from dify_graph.model_runtime.entities.llm_entities import LLMUsage
|
|
from dify_graph.node_events import NodeRunResult
|
|
|
|
from .base import NodeEventBase
|
|
|
|
|
|
class RunRetrieverResourceEvent(NodeEventBase):
|
|
retriever_resources: Sequence[RetrievalSourceMetadata] = Field(..., description="retriever resources")
|
|
context: str = Field(..., description="context")
|
|
context_files: list[File] | None = Field(default=None, description="context files")
|
|
|
|
|
|
class ModelInvokeCompletedEvent(NodeEventBase):
|
|
text: str
|
|
usage: LLMUsage
|
|
finish_reason: str | None = None
|
|
reasoning_content: str | None = None
|
|
structured_output: dict | None = None
|
|
|
|
|
|
class RunRetryEvent(NodeEventBase):
|
|
error: str = Field(..., description="error")
|
|
retry_index: int = Field(..., description="Retry attempt number")
|
|
start_at: datetime = Field(..., description="Retry start time")
|
|
|
|
|
|
class ChunkType(StrEnum):
|
|
"""Stream chunk type for LLM-related events."""
|
|
|
|
TEXT = "text" # Normal text streaming
|
|
TOOL_CALL = "tool_call" # Tool call arguments streaming
|
|
TOOL_RESULT = "tool_result" # Tool execution result
|
|
THOUGHT = "thought" # Agent thinking process (ReAct)
|
|
THOUGHT_START = "thought_start" # Agent thought start
|
|
THOUGHT_END = "thought_end" # Agent thought end
|
|
MODEL_START = "model_start" # Model turn started with identity info
|
|
MODEL_END = "model_end" # Model turn completed with metrics
|
|
|
|
|
|
class StreamChunkEvent(NodeEventBase):
|
|
"""Base stream chunk event - normal text streaming output."""
|
|
|
|
selector: Sequence[str] = Field(
|
|
..., description="selector identifying the output location (e.g., ['nodeA', 'text'])"
|
|
)
|
|
chunk: str = Field(..., description="the actual chunk content")
|
|
is_final: bool = Field(default=False, description="indicates if this is the last chunk")
|
|
chunk_type: ChunkType = Field(default=ChunkType.TEXT, description="type of the chunk")
|
|
tool_call: ToolCall | None = Field(default=None, description="structured payload for tool_call chunks")
|
|
tool_result: ToolResult | None = Field(default=None, description="structured payload for tool_result chunks")
|
|
# Model identity fields (when chunk_type == MODEL_START)
|
|
model_provider: str | None = Field(default=None, description="model provider identifier")
|
|
model_name: str | None = Field(default=None, description="model name")
|
|
model_icon: str | dict | None = Field(default=None, description="model provider icon")
|
|
model_icon_dark: str | dict | None = Field(default=None, description="model provider dark icon")
|
|
# Model metrics fields (when chunk_type == MODEL_END)
|
|
model_usage: LLMUsage | None = Field(default=None, description="per-turn token usage")
|
|
model_duration: float | None = Field(default=None, description="per-turn duration in seconds")
|
|
|
|
|
|
class ToolCallChunkEvent(StreamChunkEvent):
|
|
"""Tool call streaming event - tool call arguments streaming output."""
|
|
|
|
chunk_type: ChunkType = Field(default=ChunkType.TOOL_CALL, frozen=True)
|
|
tool_call: ToolCall | None = Field(default=None, description="structured tool call payload")
|
|
|
|
|
|
class ToolResultChunkEvent(StreamChunkEvent):
|
|
"""Tool result event - tool execution result."""
|
|
|
|
chunk_type: ChunkType = Field(default=ChunkType.TOOL_RESULT, frozen=True)
|
|
tool_result: ToolResult | None = Field(default=None, description="structured tool result payload")
|
|
|
|
|
|
class ThoughtStartChunkEvent(StreamChunkEvent):
|
|
"""Agent thought start streaming event - Agent thinking process (ReAct)."""
|
|
|
|
chunk_type: ChunkType = Field(default=ChunkType.THOUGHT_START, frozen=True)
|
|
|
|
|
|
class ThoughtEndChunkEvent(StreamChunkEvent):
|
|
"""Agent thought end streaming event - Agent thinking process (ReAct)."""
|
|
|
|
chunk_type: ChunkType = Field(default=ChunkType.THOUGHT_END, frozen=True)
|
|
|
|
|
|
class ThoughtChunkEvent(StreamChunkEvent):
|
|
"""Agent thought streaming event - Agent thinking process (ReAct)."""
|
|
|
|
chunk_type: ChunkType = Field(default=ChunkType.THOUGHT, frozen=True)
|
|
|
|
|
|
class StreamCompletedEvent(NodeEventBase):
|
|
node_run_result: NodeRunResult = Field(..., description="run result")
|
|
|
|
|
|
class PauseRequestedEvent(NodeEventBase):
|
|
reason: PauseReason = Field(..., description="pause reason")
|
|
|
|
|
|
class HumanInputFormFilledEvent(NodeEventBase):
|
|
"""Event emitted when a human input form is submitted."""
|
|
|
|
node_title: str
|
|
rendered_content: str
|
|
action_id: str
|
|
action_text: str
|
|
|
|
|
|
class HumanInputFormTimeoutEvent(NodeEventBase):
|
|
"""Event emitted when a human input form times out."""
|
|
|
|
node_title: str
|
|
expiration_time: datetime
|