chore: Avoid directly using OpenAI dependencies (#26590)

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
Will
2025-10-06 10:40:38 +08:00
committed by GitHub
parent 4a475bf1cd
commit d89c5f7146
7 changed files with 2 additions and 44 deletions

View File

@@ -1,7 +1,6 @@
from typing import TYPE_CHECKING, Any, Optional
from openai import BaseModel
from pydantic import Field
from pydantic import BaseModel, Field
# Import InvokeFrom locally to avoid circular import
from core.app.entities.app_invoke_entities import InvokeFrom

View File

@@ -1,7 +1,6 @@
from typing import Any
from openai import BaseModel
from pydantic import Field
from pydantic import BaseModel, Field
from core.app.entities.app_invoke_entities import InvokeFrom
from core.tools.entities.tool_entities import CredentialType, ToolInvokeFrom

View File

@@ -4,7 +4,6 @@ from dify_app import DifyApp
def init_app(app: DifyApp):
if dify_config.SENTRY_DSN:
import openai
import sentry_sdk
from langfuse import parse_error # type: ignore
from sentry_sdk.integrations.celery import CeleryIntegration
@@ -28,7 +27,6 @@ def init_app(app: DifyApp):
HTTPException,
ValueError,
FileNotFoundError,
openai.APIStatusError,
InvokeRateLimitError,
parse_error.defaultErrorResponse,
],

View File

@@ -37,7 +37,6 @@ dependencies = [
"mailchimp-transactional~=1.0.50",
"markdown~=3.5.1",
"numpy~=1.26.4",
"openai~=1.61.0",
"openpyxl~=3.1.5",
"opik~=1.7.25",
"opentelemetry-api==1.27.0",

View File

@@ -2,8 +2,6 @@ import uuid
from collections.abc import Generator, Mapping
from typing import Any, Union
from openai._exceptions import RateLimitError
from configs import dify_config
from core.app.apps.advanced_chat.app_generator import AdvancedChatAppGenerator
from core.app.apps.agent_chat.app_generator import AgentChatAppGenerator
@@ -122,8 +120,6 @@ class AppGenerateService:
)
else:
raise ValueError(f"Invalid app mode {app_model.mode}")
except RateLimitError as e:
raise InvokeRateLimitError(str(e))
except Exception:
rate_limit.exit(request_id)
raise

View File

@@ -3,7 +3,6 @@ from unittest.mock import MagicMock, patch
import pytest
from faker import Faker
from openai._exceptions import RateLimitError
from core.app.entities.app_invoke_entities import InvokeFrom
from models.model import EndUser
@@ -484,36 +483,6 @@ class TestAppGenerateService:
# Verify error message
assert "Rate limit exceeded" in str(exc_info.value)
def test_generate_with_rate_limit_error_from_openai(
self, db_session_with_containers, mock_external_service_dependencies
):
"""
Test generation when OpenAI rate limit error occurs.
"""
fake = Faker()
app, account = self._create_test_app_and_account(
db_session_with_containers, mock_external_service_dependencies, mode="completion"
)
# Setup completion generator to raise RateLimitError
mock_response = MagicMock()
mock_response.request = MagicMock()
mock_external_service_dependencies["completion_generator"].return_value.generate.side_effect = RateLimitError(
"Rate limit exceeded", response=mock_response, body=None
)
# Setup test arguments
args = {"inputs": {"query": fake.text(max_nb_chars=50)}, "response_mode": "streaming"}
# Execute the method under test and expect rate limit error
with pytest.raises(InvokeRateLimitError) as exc_info:
AppGenerateService.generate(
app_model=app, user=account, args=args, invoke_from=InvokeFrom.SERVICE_API, streaming=True
)
# Verify error message
assert "Rate limit exceeded" in str(exc_info.value)
def test_generate_with_invalid_app_mode(self, db_session_with_containers, mock_external_service_dependencies):
"""
Test generation with invalid app mode.

2
api/uv.lock generated
View File

@@ -1314,7 +1314,6 @@ dependencies = [
{ name = "mailchimp-transactional" },
{ name = "markdown" },
{ name = "numpy" },
{ name = "openai" },
{ name = "openpyxl" },
{ name = "opentelemetry-api" },
{ name = "opentelemetry-distro" },
@@ -1508,7 +1507,6 @@ requires-dist = [
{ name = "mailchimp-transactional", specifier = "~=1.0.50" },
{ name = "markdown", specifier = "~=3.5.1" },
{ name = "numpy", specifier = "~=1.26.4" },
{ name = "openai", specifier = "~=1.61.0" },
{ name = "openpyxl", specifier = "~=3.1.5" },
{ name = "opentelemetry-api", specifier = "==1.27.0" },
{ name = "opentelemetry-distro", specifier = "==0.48b0" },