mirror of
https://github.com/langgenius/dify.git
synced 2026-02-19 07:01:42 -05:00
refactor: remove streaming structured output from invoke_llm_with_pydantic_model
Signed-off-by: Stream <Stream_2@qq.com>
This commit is contained in:
@@ -471,7 +471,6 @@ class LLMGenerator:
|
||||
prompt_messages=complete_messages,
|
||||
output_model=CodeNodeStructuredOutput,
|
||||
model_parameters=model_parameters,
|
||||
stream=True,
|
||||
tenant_id=tenant_id,
|
||||
)
|
||||
|
||||
@@ -553,16 +552,10 @@ class LLMGenerator:
|
||||
|
||||
completion_params = model_config.get("completion_params", {}) if model_config else {}
|
||||
try:
|
||||
response = invoke_llm_with_pydantic_model(
|
||||
provider=model_instance.provider,
|
||||
model_schema=model_schema,
|
||||
model_instance=model_instance,
|
||||
prompt_messages=prompt_messages,
|
||||
output_model=SuggestedQuestionsOutput,
|
||||
model_parameters=completion_params,
|
||||
stream=True,
|
||||
tenant_id=tenant_id,
|
||||
)
|
||||
response = invoke_llm_with_pydantic_model(provider=model_instance.provider, model_schema=model_schema,
|
||||
model_instance=model_instance, prompt_messages=prompt_messages,
|
||||
output_model=SuggestedQuestionsOutput,
|
||||
model_parameters=completion_params, tenant_id=tenant_id)
|
||||
|
||||
return {"questions": response.questions, "error": ""}
|
||||
|
||||
@@ -842,15 +835,11 @@ Generate {language} code to extract/transform available variables for the target
|
||||
try:
|
||||
from core.llm_generator.output_parser.structured_output import invoke_llm_with_pydantic_model
|
||||
|
||||
response = invoke_llm_with_pydantic_model(
|
||||
provider=model_instance.provider,
|
||||
model_schema=model_schema,
|
||||
model_instance=model_instance,
|
||||
prompt_messages=list(prompt_messages),
|
||||
output_model=InstructionModifyOutput,
|
||||
model_parameters=model_parameters,
|
||||
stream=True,
|
||||
)
|
||||
response = invoke_llm_with_pydantic_model(provider=model_instance.provider, model_schema=model_schema,
|
||||
model_instance=model_instance,
|
||||
prompt_messages=list(prompt_messages),
|
||||
output_model=InstructionModifyOutput,
|
||||
model_parameters=model_parameters)
|
||||
return response.model_dump(mode="python")
|
||||
except InvokeError as e:
|
||||
error = str(e)
|
||||
|
||||
@@ -262,7 +262,6 @@ def invoke_llm_with_pydantic_model(
|
||||
model_parameters: Mapping | None = None,
|
||||
tools: Sequence[PromptMessageTool] | None = None,
|
||||
stop: list[str] | None = None,
|
||||
stream: bool = True, # Some model plugin implementations don't support stream=False
|
||||
user: str | None = None,
|
||||
callbacks: list[Callback] | None = None,
|
||||
tenant_id: str | None = None,
|
||||
@@ -281,36 +280,6 @@ def invoke_llm_with_pydantic_model(
|
||||
"""
|
||||
json_schema = _schema_from_pydantic(output_model)
|
||||
|
||||
if stream:
|
||||
result_generator = invoke_llm_with_structured_output(
|
||||
provider=provider,
|
||||
model_schema=model_schema,
|
||||
model_instance=model_instance,
|
||||
prompt_messages=prompt_messages,
|
||||
json_schema=json_schema,
|
||||
model_parameters=model_parameters,
|
||||
tools=tools,
|
||||
stop=stop,
|
||||
stream=True,
|
||||
user=user,
|
||||
callbacks=callbacks,
|
||||
tenant_id=tenant_id,
|
||||
)
|
||||
|
||||
# Consume the generator to get the final chunk with structured_output
|
||||
last_chunk: LLMResultChunkWithStructuredOutput | None = None
|
||||
for chunk in result_generator:
|
||||
last_chunk = chunk
|
||||
|
||||
if last_chunk is None:
|
||||
raise OutputParserError("No chunks received from LLM")
|
||||
|
||||
structured_output = last_chunk.structured_output
|
||||
if structured_output is None:
|
||||
raise OutputParserError("Structured output is empty")
|
||||
|
||||
return _validate_structured_output(output_model, structured_output)
|
||||
|
||||
result = invoke_llm_with_structured_output(
|
||||
provider=provider,
|
||||
model_schema=model_schema,
|
||||
|
||||
@@ -492,7 +492,6 @@ def test_structured_output_with_pydantic_model_non_streaming():
|
||||
model_instance=model_instance,
|
||||
prompt_messages=prompt_messages,
|
||||
output_model=ExampleOutput,
|
||||
stream=False,
|
||||
)
|
||||
|
||||
assert isinstance(result, ExampleOutput)
|
||||
@@ -532,8 +531,7 @@ def test_structured_output_with_pydantic_model_streaming():
|
||||
model_schema=model_schema,
|
||||
model_instance=model_instance,
|
||||
prompt_messages=[UserPromptMessage(content="Return a JSON object with name.")],
|
||||
output_model=ExampleOutput,
|
||||
stream=True,
|
||||
output_model=ExampleOutput
|
||||
)
|
||||
|
||||
assert isinstance(result, ExampleOutput)
|
||||
@@ -555,8 +553,7 @@ def test_structured_output_with_pydantic_model_validation_error():
|
||||
model_schema=model_schema,
|
||||
model_instance=model_instance,
|
||||
prompt_messages=[UserPromptMessage(content="test")],
|
||||
output_model=ExampleOutput,
|
||||
stream=False,
|
||||
output_model=ExampleOutput
|
||||
)
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user