mirror of
https://github.com/langgenius/dify.git
synced 2025-12-19 17:27:16 -05:00
Merge branch 'main' into feature/smtp-oauth2-support
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
FROM mcr.microsoft.com/devcontainers/python:3.12-bullseye
|
||||
FROM mcr.microsoft.com/devcontainers/python:3.12-bookworm
|
||||
|
||||
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
||||
&& apt-get -y install libgmp-dev libmpfr-dev libmpc-dev
|
||||
|
||||
1
Makefile
1
Makefile
@@ -26,7 +26,6 @@ prepare-web:
|
||||
@echo "🌐 Setting up web environment..."
|
||||
@cp -n web/.env.example web/.env 2>/dev/null || echo "Web .env already exists"
|
||||
@cd web && pnpm install
|
||||
@cd web && pnpm build
|
||||
@echo "✅ Web environment prepared (not started)"
|
||||
|
||||
# Step 3: Prepare API environment
|
||||
|
||||
24
README.md
24
README.md
@@ -40,18 +40,18 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="./README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README/README_TW.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="./README/README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README/README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README/README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README/README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README/README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README/README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README/README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README/README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README/README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README/README_DE.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="./README/README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
<a href="./docs/zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="./docs/zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./docs/ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./docs/es-ES/README.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./docs/fr-FR/README.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./docs/tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./docs/ko-KR/README.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./docs/ar-SA/README.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./docs/tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./docs/vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./docs/de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="./docs/bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
Dify is an open-source platform for developing LLM applications. Its intuitive interface combines agentic AI workflows, RAG pipelines, agent capabilities, model management, observability features, and more—allowing you to quickly move from prototype to production.
|
||||
|
||||
@@ -19,6 +19,7 @@ from core.ops.ops_trace_manager import OpsTraceManager
|
||||
from extensions.ext_database import db
|
||||
from fields.app_fields import app_detail_fields, app_detail_fields_with_site, app_pagination_fields
|
||||
from libs.login import login_required
|
||||
from libs.validators import validate_description_length
|
||||
from models import Account, App
|
||||
from services.app_dsl_service import AppDslService, ImportMode
|
||||
from services.app_service import AppService
|
||||
@@ -28,12 +29,6 @@ from services.feature_service import FeatureService
|
||||
ALLOW_CREATE_APP_MODES = ["chat", "agent-chat", "advanced-chat", "workflow", "completion"]
|
||||
|
||||
|
||||
def _validate_description_length(description):
|
||||
if description and len(description) > 400:
|
||||
raise ValueError("Description cannot exceed 400 characters.")
|
||||
return description
|
||||
|
||||
|
||||
@console_ns.route("/apps")
|
||||
class AppListApi(Resource):
|
||||
@api.doc("list_apps")
|
||||
@@ -138,7 +133,7 @@ class AppListApi(Resource):
|
||||
"""Create app"""
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("name", type=str, required=True, location="json")
|
||||
parser.add_argument("description", type=_validate_description_length, location="json")
|
||||
parser.add_argument("description", type=validate_description_length, location="json")
|
||||
parser.add_argument("mode", type=str, choices=ALLOW_CREATE_APP_MODES, location="json")
|
||||
parser.add_argument("icon_type", type=str, location="json")
|
||||
parser.add_argument("icon", type=str, location="json")
|
||||
@@ -219,7 +214,7 @@ class AppApi(Resource):
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("name", type=str, required=True, nullable=False, location="json")
|
||||
parser.add_argument("description", type=_validate_description_length, location="json")
|
||||
parser.add_argument("description", type=validate_description_length, location="json")
|
||||
parser.add_argument("icon_type", type=str, location="json")
|
||||
parser.add_argument("icon", type=str, location="json")
|
||||
parser.add_argument("icon_background", type=str, location="json")
|
||||
@@ -297,7 +292,7 @@ class AppCopyApi(Resource):
|
||||
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("name", type=str, location="json")
|
||||
parser.add_argument("description", type=_validate_description_length, location="json")
|
||||
parser.add_argument("description", type=validate_description_length, location="json")
|
||||
parser.add_argument("icon_type", type=str, location="json")
|
||||
parser.add_argument("icon", type=str, location="json")
|
||||
parser.add_argument("icon_background", type=str, location="json")
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import flask_restx
|
||||
from typing import Any, cast
|
||||
|
||||
from flask import request
|
||||
from flask_login import current_user
|
||||
from flask_restx import Resource, fields, marshal, marshal_with, reqparse
|
||||
@@ -30,24 +31,20 @@ from fields.app_fields import related_app_list
|
||||
from fields.dataset_fields import dataset_detail_fields, dataset_query_detail_fields
|
||||
from fields.document_fields import document_status_fields
|
||||
from libs.login import login_required
|
||||
from libs.validators import validate_description_length
|
||||
from models import ApiToken, Dataset, Document, DocumentSegment, UploadFile
|
||||
from models.account import Account
|
||||
from models.dataset import DatasetPermissionEnum
|
||||
from models.provider_ids import ModelProviderID
|
||||
from services.dataset_service import DatasetPermissionService, DatasetService, DocumentService
|
||||
|
||||
|
||||
def _validate_name(name):
|
||||
def _validate_name(name: str) -> str:
|
||||
if not name or len(name) < 1 or len(name) > 40:
|
||||
raise ValueError("Name must be between 1 to 40 characters.")
|
||||
return name
|
||||
|
||||
|
||||
def _validate_description_length(description):
|
||||
if description and len(description) > 400:
|
||||
raise ValueError("Description cannot exceed 400 characters.")
|
||||
return description
|
||||
|
||||
|
||||
@console_ns.route("/datasets")
|
||||
class DatasetListApi(Resource):
|
||||
@api.doc("get_datasets")
|
||||
@@ -92,7 +89,7 @@ class DatasetListApi(Resource):
|
||||
for embedding_model in embedding_models:
|
||||
model_names.append(f"{embedding_model.model}:{embedding_model.provider.provider}")
|
||||
|
||||
data = marshal(datasets, dataset_detail_fields)
|
||||
data = cast(list[dict[str, Any]], marshal(datasets, dataset_detail_fields))
|
||||
for item in data:
|
||||
# convert embedding_model_provider to plugin standard format
|
||||
if item["indexing_technique"] == "high_quality" and item["embedding_model_provider"]:
|
||||
@@ -147,7 +144,7 @@ class DatasetListApi(Resource):
|
||||
)
|
||||
parser.add_argument(
|
||||
"description",
|
||||
type=_validate_description_length,
|
||||
type=validate_description_length,
|
||||
nullable=True,
|
||||
required=False,
|
||||
default="",
|
||||
@@ -192,7 +189,7 @@ class DatasetListApi(Resource):
|
||||
name=args["name"],
|
||||
description=args["description"],
|
||||
indexing_technique=args["indexing_technique"],
|
||||
account=current_user,
|
||||
account=cast(Account, current_user),
|
||||
permission=DatasetPermissionEnum.ONLY_ME,
|
||||
provider=args["provider"],
|
||||
external_knowledge_api_id=args["external_knowledge_api_id"],
|
||||
@@ -224,7 +221,7 @@ class DatasetApi(Resource):
|
||||
DatasetService.check_dataset_permission(dataset, current_user)
|
||||
except services.errors.account.NoPermissionError as e:
|
||||
raise Forbidden(str(e))
|
||||
data = marshal(dataset, dataset_detail_fields)
|
||||
data = cast(dict[str, Any], marshal(dataset, dataset_detail_fields))
|
||||
if dataset.indexing_technique == "high_quality":
|
||||
if dataset.embedding_model_provider:
|
||||
provider_id = ModelProviderID(dataset.embedding_model_provider)
|
||||
@@ -288,7 +285,7 @@ class DatasetApi(Resource):
|
||||
help="type is required. Name must be between 1 to 40 characters.",
|
||||
type=_validate_name,
|
||||
)
|
||||
parser.add_argument("description", location="json", store_missing=False, type=_validate_description_length)
|
||||
parser.add_argument("description", location="json", store_missing=False, type=validate_description_length)
|
||||
parser.add_argument(
|
||||
"indexing_technique",
|
||||
type=str,
|
||||
@@ -369,7 +366,7 @@ class DatasetApi(Resource):
|
||||
if dataset is None:
|
||||
raise NotFound("Dataset not found.")
|
||||
|
||||
result_data = marshal(dataset, dataset_detail_fields)
|
||||
result_data = cast(dict[str, Any], marshal(dataset, dataset_detail_fields))
|
||||
tenant_id = current_user.current_tenant_id
|
||||
|
||||
if data.get("partial_member_list") and data.get("permission") == "partial_members":
|
||||
@@ -688,7 +685,7 @@ class DatasetApiKeyApi(Resource):
|
||||
)
|
||||
|
||||
if current_key_count >= self.max_keys:
|
||||
flask_restx.abort(
|
||||
api.abort(
|
||||
400,
|
||||
message=f"Cannot create more than {self.max_keys} API keys for this resource type.",
|
||||
code="max_keys_exceeded",
|
||||
@@ -733,7 +730,7 @@ class DatasetApiDeleteApi(Resource):
|
||||
)
|
||||
|
||||
if key is None:
|
||||
flask_restx.abort(404, message="API key not found")
|
||||
api.abort(404, message="API key not found")
|
||||
|
||||
db.session.query(ApiToken).where(ApiToken.id == api_key_id).delete()
|
||||
db.session.commit()
|
||||
|
||||
@@ -55,6 +55,7 @@ from fields.document_fields import (
|
||||
from libs.datetime_utils import naive_utc_now
|
||||
from libs.login import login_required
|
||||
from models import Dataset, DatasetProcessRule, Document, DocumentSegment, UploadFile
|
||||
from models.account import Account
|
||||
from models.dataset import DocumentPipelineExecutionLog
|
||||
from services.dataset_service import DatasetService, DocumentService
|
||||
from services.entities.knowledge_entities.knowledge_entities import KnowledgeConfig
|
||||
@@ -418,7 +419,9 @@ class DatasetInitApi(Resource):
|
||||
|
||||
try:
|
||||
dataset, documents, batch = DocumentService.save_document_without_dataset_id(
|
||||
tenant_id=current_user.current_tenant_id, knowledge_config=knowledge_config, account=current_user
|
||||
tenant_id=current_user.current_tenant_id,
|
||||
knowledge_config=knowledge_config,
|
||||
account=cast(Account, current_user),
|
||||
)
|
||||
except ProviderTokenNotInitError as ex:
|
||||
raise ProviderNotInitializeError(ex.description)
|
||||
@@ -452,7 +455,7 @@ class DocumentIndexingEstimateApi(DocumentResource):
|
||||
raise DocumentAlreadyFinishedError()
|
||||
|
||||
data_process_rule = document.dataset_process_rule
|
||||
data_process_rule_dict = data_process_rule.to_dict()
|
||||
data_process_rule_dict = data_process_rule.to_dict() if data_process_rule else {}
|
||||
|
||||
response = {"tokens": 0, "total_price": 0, "currency": "USD", "total_segments": 0, "preview": []}
|
||||
|
||||
@@ -514,7 +517,7 @@ class DocumentBatchIndexingEstimateApi(DocumentResource):
|
||||
if not documents:
|
||||
return {"tokens": 0, "total_price": 0, "currency": "USD", "total_segments": 0, "preview": []}, 200
|
||||
data_process_rule = documents[0].dataset_process_rule
|
||||
data_process_rule_dict = data_process_rule.to_dict()
|
||||
data_process_rule_dict = data_process_rule.to_dict() if data_process_rule else {}
|
||||
extract_settings = []
|
||||
for document in documents:
|
||||
if document.indexing_status in {"completed", "error"}:
|
||||
@@ -753,7 +756,7 @@ class DocumentApi(DocumentResource):
|
||||
}
|
||||
else:
|
||||
dataset_process_rules = DatasetService.get_process_rules(dataset_id)
|
||||
document_process_rules = document.dataset_process_rule.to_dict()
|
||||
document_process_rules = document.dataset_process_rule.to_dict() if document.dataset_process_rule else {}
|
||||
data_source_info = document.data_source_detail_dict
|
||||
response = {
|
||||
"id": document.id,
|
||||
@@ -1073,7 +1076,9 @@ class DocumentRenameApi(DocumentResource):
|
||||
if not current_user.is_dataset_editor:
|
||||
raise Forbidden()
|
||||
dataset = DatasetService.get_dataset(dataset_id)
|
||||
DatasetService.check_dataset_operator_permission(current_user, dataset)
|
||||
if not dataset:
|
||||
raise NotFound("Dataset not found.")
|
||||
DatasetService.check_dataset_operator_permission(cast(Account, current_user), dataset)
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("name", type=str, required=True, nullable=False, location="json")
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -392,7 +392,12 @@ class DatasetDocumentSegmentBatchImportApi(Resource):
|
||||
# send batch add segments task
|
||||
redis_client.setnx(indexing_cache_key, "waiting")
|
||||
batch_create_segment_to_index_task.delay(
|
||||
str(job_id), upload_file_id, dataset_id, document_id, current_user.current_tenant_id, current_user.id
|
||||
str(job_id),
|
||||
upload_file_id,
|
||||
dataset_id,
|
||||
document_id,
|
||||
current_user.current_tenant_id,
|
||||
current_user.id,
|
||||
)
|
||||
except Exception as e:
|
||||
return {"error": str(e)}, 500
|
||||
@@ -468,7 +473,8 @@ class ChildChunkAddApi(Resource):
|
||||
parser.add_argument("content", type=str, required=True, nullable=False, location="json")
|
||||
args = parser.parse_args()
|
||||
try:
|
||||
child_chunk = SegmentService.create_child_chunk(args.get("content"), segment, document, dataset)
|
||||
content = args["content"]
|
||||
child_chunk = SegmentService.create_child_chunk(content, segment, document, dataset)
|
||||
except ChildChunkIndexingServiceError as e:
|
||||
raise ChildChunkIndexingError(str(e))
|
||||
return {"data": marshal(child_chunk, child_chunk_fields)}, 200
|
||||
@@ -557,7 +563,8 @@ class ChildChunkAddApi(Resource):
|
||||
parser.add_argument("chunks", type=list, required=True, nullable=False, location="json")
|
||||
args = parser.parse_args()
|
||||
try:
|
||||
chunks = [ChildChunkUpdateArgs(**chunk) for chunk in args.get("chunks")]
|
||||
chunks_data = args["chunks"]
|
||||
chunks = [ChildChunkUpdateArgs(**chunk) for chunk in chunks_data]
|
||||
child_chunks = SegmentService.update_child_chunks(chunks, segment, document, dataset)
|
||||
except ChildChunkIndexingServiceError as e:
|
||||
raise ChildChunkIndexingError(str(e))
|
||||
@@ -674,9 +681,8 @@ class ChildChunkUpdateApi(Resource):
|
||||
parser.add_argument("content", type=str, required=True, nullable=False, location="json")
|
||||
args = parser.parse_args()
|
||||
try:
|
||||
child_chunk = SegmentService.update_child_chunk(
|
||||
args.get("content"), child_chunk, segment, document, dataset
|
||||
)
|
||||
content = args["content"]
|
||||
child_chunk = SegmentService.update_child_chunk(content, child_chunk, segment, document, dataset)
|
||||
except ChildChunkIndexingServiceError as e:
|
||||
raise ChildChunkIndexingError(str(e))
|
||||
return {"data": marshal(child_chunk, child_chunk_fields)}, 200
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from typing import cast
|
||||
|
||||
from flask import request
|
||||
from flask_login import current_user
|
||||
from flask_restx import Resource, fields, marshal, reqparse
|
||||
@@ -9,13 +11,14 @@ from controllers.console.datasets.error import DatasetNameDuplicateError
|
||||
from controllers.console.wraps import account_initialization_required, setup_required
|
||||
from fields.dataset_fields import dataset_detail_fields
|
||||
from libs.login import login_required
|
||||
from models.account import Account
|
||||
from services.dataset_service import DatasetService
|
||||
from services.external_knowledge_service import ExternalDatasetService
|
||||
from services.hit_testing_service import HitTestingService
|
||||
from services.knowledge_service import ExternalDatasetTestService
|
||||
|
||||
|
||||
def _validate_name(name):
|
||||
def _validate_name(name: str) -> str:
|
||||
if not name or len(name) < 1 or len(name) > 100:
|
||||
raise ValueError("Name must be between 1 to 100 characters.")
|
||||
return name
|
||||
@@ -274,7 +277,7 @@ class ExternalKnowledgeHitTestingApi(Resource):
|
||||
response = HitTestingService.external_retrieve(
|
||||
dataset=dataset,
|
||||
query=args["query"],
|
||||
account=current_user,
|
||||
account=cast(Account, current_user),
|
||||
external_retrieval_model=args["external_retrieval_model"],
|
||||
metadata_filtering_conditions=args["metadata_filtering_conditions"],
|
||||
)
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
import logging
|
||||
from typing import cast
|
||||
|
||||
from flask_login import current_user
|
||||
from flask_restx import marshal, reqparse
|
||||
from werkzeug.exceptions import Forbidden, InternalServerError, NotFound
|
||||
|
||||
import services.dataset_service
|
||||
import services
|
||||
from controllers.console.app.error import (
|
||||
CompletionRequestError,
|
||||
ProviderModelCurrentlyNotSupportError,
|
||||
@@ -20,6 +21,7 @@ from core.errors.error import (
|
||||
)
|
||||
from core.model_runtime.errors.invoke import InvokeError
|
||||
from fields.hit_testing_fields import hit_testing_record_fields
|
||||
from models.account import Account
|
||||
from services.dataset_service import DatasetService
|
||||
from services.hit_testing_service import HitTestingService
|
||||
|
||||
@@ -59,7 +61,7 @@ class DatasetsHitTestingBase:
|
||||
response = HitTestingService.retrieve(
|
||||
dataset=dataset,
|
||||
query=args["query"],
|
||||
account=current_user,
|
||||
account=cast(Account, current_user),
|
||||
retrieval_model=args["retrieval_model"],
|
||||
external_retrieval_model=args["external_retrieval_model"],
|
||||
limit=10,
|
||||
|
||||
@@ -62,6 +62,7 @@ class DatasetMetadataApi(Resource):
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument("name", type=str, required=True, nullable=False, location="json")
|
||||
args = parser.parse_args()
|
||||
name = args["name"]
|
||||
|
||||
dataset_id_str = str(dataset_id)
|
||||
metadata_id_str = str(metadata_id)
|
||||
@@ -70,7 +71,7 @@ class DatasetMetadataApi(Resource):
|
||||
raise NotFound("Dataset not found.")
|
||||
DatasetService.check_dataset_permission(dataset, current_user)
|
||||
|
||||
metadata = MetadataService.update_metadata_name(dataset_id_str, metadata_id_str, args.get("name"))
|
||||
metadata = MetadataService.update_metadata_name(dataset_id_str, metadata_id_str, name)
|
||||
return metadata, 200
|
||||
|
||||
@setup_required
|
||||
|
||||
@@ -20,13 +20,13 @@ from services.rag_pipeline.rag_pipeline import RagPipelineService
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _validate_name(name):
|
||||
def _validate_name(name: str) -> str:
|
||||
if not name or len(name) < 1 or len(name) > 40:
|
||||
raise ValueError("Name must be between 1 to 40 characters.")
|
||||
return name
|
||||
|
||||
|
||||
def _validate_description_length(description):
|
||||
def _validate_description_length(description: str) -> str:
|
||||
if len(description) > 400:
|
||||
raise ValueError("Description cannot exceed 400 characters.")
|
||||
return description
|
||||
@@ -76,7 +76,7 @@ class CustomizedPipelineTemplateApi(Resource):
|
||||
)
|
||||
parser.add_argument(
|
||||
"description",
|
||||
type=str,
|
||||
type=_validate_description_length,
|
||||
nullable=True,
|
||||
required=False,
|
||||
default="",
|
||||
@@ -133,7 +133,7 @@ class PublishCustomizedPipelineTemplateApi(Resource):
|
||||
)
|
||||
parser.add_argument(
|
||||
"description",
|
||||
type=str,
|
||||
type=_validate_description_length,
|
||||
nullable=True,
|
||||
required=False,
|
||||
default="",
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
from flask_login import current_user # type: ignore # type: ignore
|
||||
from flask_restx import Resource, marshal, reqparse # type: ignore
|
||||
from flask_login import current_user
|
||||
from flask_restx import Resource, marshal, reqparse
|
||||
from sqlalchemy.orm import Session
|
||||
from werkzeug.exceptions import Forbidden
|
||||
|
||||
@@ -20,18 +20,6 @@ from services.entities.knowledge_entities.rag_pipeline_entities import IconInfo,
|
||||
from services.rag_pipeline.rag_pipeline_dsl_service import RagPipelineDslService
|
||||
|
||||
|
||||
def _validate_name(name):
|
||||
if not name or len(name) < 1 or len(name) > 40:
|
||||
raise ValueError("Name must be between 1 to 40 characters.")
|
||||
return name
|
||||
|
||||
|
||||
def _validate_description_length(description):
|
||||
if len(description) > 400:
|
||||
raise ValueError("Description cannot exceed 400 characters.")
|
||||
return description
|
||||
|
||||
|
||||
@console_ns.route("/rag/pipeline/dataset")
|
||||
class CreateRagPipelineDatasetApi(Resource):
|
||||
@setup_required
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import logging
|
||||
from typing import Any, NoReturn
|
||||
from typing import NoReturn
|
||||
|
||||
from flask import Response
|
||||
from flask_restx import Resource, fields, inputs, marshal, marshal_with, reqparse
|
||||
@@ -11,14 +11,12 @@ from controllers.console.app.error import (
|
||||
DraftWorkflowNotExist,
|
||||
)
|
||||
from controllers.console.app.workflow_draft_variable import (
|
||||
_WORKFLOW_DRAFT_VARIABLE_FIELDS,
|
||||
_WORKFLOW_DRAFT_VARIABLE_WITHOUT_VALUE_FIELDS,
|
||||
_WORKFLOW_DRAFT_VARIABLE_FIELDS, # type: ignore[private-usage]
|
||||
_WORKFLOW_DRAFT_VARIABLE_WITHOUT_VALUE_FIELDS, # type: ignore[private-usage]
|
||||
)
|
||||
from controllers.console.datasets.wraps import get_rag_pipeline
|
||||
from controllers.console.wraps import account_initialization_required, setup_required
|
||||
from controllers.web.error import InvalidArgumentError, NotFoundError
|
||||
from core.variables.segment_group import SegmentGroup
|
||||
from core.variables.segments import ArrayFileSegment, FileSegment, Segment
|
||||
from core.variables.types import SegmentType
|
||||
from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, SYSTEM_VARIABLE_NODE_ID
|
||||
from extensions.ext_database import db
|
||||
@@ -34,32 +32,6 @@ from services.workflow_draft_variable_service import WorkflowDraftVariableList,
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _convert_values_to_json_serializable_object(value: Segment) -> Any:
|
||||
if isinstance(value, FileSegment):
|
||||
return value.value.model_dump()
|
||||
elif isinstance(value, ArrayFileSegment):
|
||||
return [i.model_dump() for i in value.value]
|
||||
elif isinstance(value, SegmentGroup):
|
||||
return [_convert_values_to_json_serializable_object(i) for i in value.value]
|
||||
else:
|
||||
return value.value
|
||||
|
||||
|
||||
def _serialize_var_value(variable: WorkflowDraftVariable) -> Any:
|
||||
value = variable.get_value()
|
||||
# create a copy of the value to avoid affecting the model cache.
|
||||
value = value.model_copy(deep=True)
|
||||
# Refresh the url signature before returning it to client.
|
||||
if isinstance(value, FileSegment):
|
||||
file = value.value
|
||||
file.remote_url = file.generate_url()
|
||||
elif isinstance(value, ArrayFileSegment):
|
||||
files = value.value
|
||||
for file in files:
|
||||
file.remote_url = file.generate_url()
|
||||
return _convert_values_to_json_serializable_object(value)
|
||||
|
||||
|
||||
def _create_pagination_parser():
|
||||
parser = reqparse.RequestParser()
|
||||
parser.add_argument(
|
||||
@@ -104,7 +76,7 @@ def _api_prerequisite(f):
|
||||
@account_initialization_required
|
||||
@get_rag_pipeline
|
||||
def wrapper(*args, **kwargs):
|
||||
if not isinstance(current_user, Account) or not current_user.is_editor:
|
||||
if not isinstance(current_user, Account) or not current_user.has_edit_permission:
|
||||
raise Forbidden()
|
||||
return f(*args, **kwargs)
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ from flask_login import current_user
|
||||
from flask_restx import Resource, marshal_with, reqparse
|
||||
from werkzeug.exceptions import Forbidden
|
||||
|
||||
from controllers.console import api
|
||||
from controllers.console import console_ns
|
||||
from controllers.console.wraps import account_initialization_required, setup_required
|
||||
from fields.tag_fields import dataset_tag_fields
|
||||
from libs.login import login_required
|
||||
@@ -17,6 +17,7 @@ def _validate_name(name):
|
||||
return name
|
||||
|
||||
|
||||
@console_ns.route("/tags")
|
||||
class TagListApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@@ -52,6 +53,7 @@ class TagListApi(Resource):
|
||||
return response, 200
|
||||
|
||||
|
||||
@console_ns.route("/tags/<uuid:tag_id>")
|
||||
class TagUpdateDeleteApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@@ -89,6 +91,7 @@ class TagUpdateDeleteApi(Resource):
|
||||
return 204
|
||||
|
||||
|
||||
@console_ns.route("/tag-bindings/create")
|
||||
class TagBindingCreateApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@@ -114,6 +117,7 @@ class TagBindingCreateApi(Resource):
|
||||
return {"result": "success"}, 200
|
||||
|
||||
|
||||
@console_ns.route("/tag-bindings/remove")
|
||||
class TagBindingDeleteApi(Resource):
|
||||
@setup_required
|
||||
@login_required
|
||||
@@ -133,9 +137,3 @@ class TagBindingDeleteApi(Resource):
|
||||
TagService.delete_tag_binding(args)
|
||||
|
||||
return {"result": "success"}, 200
|
||||
|
||||
|
||||
api.add_resource(TagListApi, "/tags")
|
||||
api.add_resource(TagUpdateDeleteApi, "/tags/<uuid:tag_id>")
|
||||
api.add_resource(TagBindingCreateApi, "/tag-bindings/create")
|
||||
api.add_resource(TagBindingDeleteApi, "/tag-bindings/remove")
|
||||
|
||||
@@ -17,6 +17,7 @@ from core.provider_manager import ProviderManager
|
||||
from fields.dataset_fields import dataset_detail_fields
|
||||
from fields.tag_fields import build_dataset_tag_fields
|
||||
from libs.login import current_user
|
||||
from libs.validators import validate_description_length
|
||||
from models.account import Account
|
||||
from models.dataset import Dataset, DatasetPermissionEnum
|
||||
from models.provider_ids import ModelProviderID
|
||||
@@ -31,12 +32,6 @@ def _validate_name(name):
|
||||
return name
|
||||
|
||||
|
||||
def _validate_description_length(description):
|
||||
if description and len(description) > 400:
|
||||
raise ValueError("Description cannot exceed 400 characters.")
|
||||
return description
|
||||
|
||||
|
||||
# Define parsers for dataset operations
|
||||
dataset_create_parser = reqparse.RequestParser()
|
||||
dataset_create_parser.add_argument(
|
||||
@@ -48,7 +43,7 @@ dataset_create_parser.add_argument(
|
||||
)
|
||||
dataset_create_parser.add_argument(
|
||||
"description",
|
||||
type=_validate_description_length,
|
||||
type=validate_description_length,
|
||||
nullable=True,
|
||||
required=False,
|
||||
default="",
|
||||
@@ -101,7 +96,7 @@ dataset_update_parser.add_argument(
|
||||
type=_validate_name,
|
||||
)
|
||||
dataset_update_parser.add_argument(
|
||||
"description", location="json", store_missing=False, type=_validate_description_length
|
||||
"description", location="json", store_missing=False, type=validate_description_length
|
||||
)
|
||||
dataset_update_parser.add_argument(
|
||||
"indexing_technique",
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import logging
|
||||
import queue
|
||||
import time
|
||||
from abc import abstractmethod
|
||||
from enum import IntEnum, auto
|
||||
from typing import Any
|
||||
|
||||
from redis.exceptions import RedisError
|
||||
from sqlalchemy.orm import DeclarativeMeta
|
||||
|
||||
from configs import dify_config
|
||||
@@ -18,6 +20,8 @@ from core.app.entities.queue_entities import (
|
||||
)
|
||||
from extensions.ext_redis import redis_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PublishFrom(IntEnum):
|
||||
APPLICATION_MANAGER = auto()
|
||||
@@ -35,9 +39,8 @@ class AppQueueManager:
|
||||
self.invoke_from = invoke_from # Public accessor for invoke_from
|
||||
|
||||
user_prefix = "account" if self._invoke_from in {InvokeFrom.EXPLORE, InvokeFrom.DEBUGGER} else "end-user"
|
||||
redis_client.setex(
|
||||
AppQueueManager._generate_task_belong_cache_key(self._task_id), 1800, f"{user_prefix}-{self._user_id}"
|
||||
)
|
||||
self._task_belong_cache_key = AppQueueManager._generate_task_belong_cache_key(self._task_id)
|
||||
redis_client.setex(self._task_belong_cache_key, 1800, f"{user_prefix}-{self._user_id}")
|
||||
|
||||
q: queue.Queue[WorkflowQueueMessage | MessageQueueMessage | None] = queue.Queue()
|
||||
|
||||
@@ -79,9 +82,21 @@ class AppQueueManager:
|
||||
Stop listen to queue
|
||||
:return:
|
||||
"""
|
||||
self._clear_task_belong_cache()
|
||||
self._q.put(None)
|
||||
|
||||
def publish_error(self, e, pub_from: PublishFrom):
|
||||
def _clear_task_belong_cache(self) -> None:
|
||||
"""
|
||||
Remove the task belong cache key once listening is finished.
|
||||
"""
|
||||
try:
|
||||
redis_client.delete(self._task_belong_cache_key)
|
||||
except RedisError:
|
||||
logger.exception(
|
||||
"Failed to clear task belong cache for task %s (key: %s)", self._task_id, self._task_belong_cache_key
|
||||
)
|
||||
|
||||
def publish_error(self, e, pub_from: PublishFrom) -> None:
|
||||
"""
|
||||
Publish error
|
||||
:param e: error
|
||||
|
||||
@@ -74,7 +74,7 @@ class TextPromptMessageContent(PromptMessageContent):
|
||||
Model class for text prompt message content.
|
||||
"""
|
||||
|
||||
type: Literal[PromptMessageContentType.TEXT] = PromptMessageContentType.TEXT
|
||||
type: Literal[PromptMessageContentType.TEXT] = PromptMessageContentType.TEXT # type: ignore
|
||||
data: str
|
||||
|
||||
|
||||
@@ -95,11 +95,11 @@ class MultiModalPromptMessageContent(PromptMessageContent):
|
||||
|
||||
|
||||
class VideoPromptMessageContent(MultiModalPromptMessageContent):
|
||||
type: Literal[PromptMessageContentType.VIDEO] = PromptMessageContentType.VIDEO
|
||||
type: Literal[PromptMessageContentType.VIDEO] = PromptMessageContentType.VIDEO # type: ignore
|
||||
|
||||
|
||||
class AudioPromptMessageContent(MultiModalPromptMessageContent):
|
||||
type: Literal[PromptMessageContentType.AUDIO] = PromptMessageContentType.AUDIO
|
||||
type: Literal[PromptMessageContentType.AUDIO] = PromptMessageContentType.AUDIO # type: ignore
|
||||
|
||||
|
||||
class ImagePromptMessageContent(MultiModalPromptMessageContent):
|
||||
@@ -111,12 +111,12 @@ class ImagePromptMessageContent(MultiModalPromptMessageContent):
|
||||
LOW = auto()
|
||||
HIGH = auto()
|
||||
|
||||
type: Literal[PromptMessageContentType.IMAGE] = PromptMessageContentType.IMAGE
|
||||
type: Literal[PromptMessageContentType.IMAGE] = PromptMessageContentType.IMAGE # type: ignore
|
||||
detail: DETAIL = DETAIL.LOW
|
||||
|
||||
|
||||
class DocumentPromptMessageContent(MultiModalPromptMessageContent):
|
||||
type: Literal[PromptMessageContentType.DOCUMENT] = PromptMessageContentType.DOCUMENT
|
||||
type: Literal[PromptMessageContentType.DOCUMENT] = PromptMessageContentType.DOCUMENT # type: ignore
|
||||
|
||||
|
||||
PromptMessageContentUnionTypes = Annotated[
|
||||
|
||||
@@ -15,7 +15,7 @@ class GPT2Tokenizer:
|
||||
use gpt2 tokenizer to get num tokens
|
||||
"""
|
||||
_tokenizer = GPT2Tokenizer.get_encoder()
|
||||
tokens = _tokenizer.encode(text)
|
||||
tokens = _tokenizer.encode(text) # type: ignore
|
||||
return len(tokens)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -196,15 +196,15 @@ def jsonable_encoder(
|
||||
return encoder(obj)
|
||||
|
||||
try:
|
||||
data = dict(obj)
|
||||
data = dict(obj) # type: ignore
|
||||
except Exception as e:
|
||||
errors: list[Exception] = []
|
||||
errors.append(e)
|
||||
try:
|
||||
data = vars(obj)
|
||||
data = vars(obj) # type: ignore
|
||||
except Exception as e:
|
||||
errors.append(e)
|
||||
raise ValueError(errors) from e
|
||||
raise ValueError(str(errors)) from e
|
||||
return jsonable_encoder(
|
||||
data,
|
||||
by_alias=by_alias,
|
||||
|
||||
@@ -3,7 +3,8 @@ from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
from opentelemetry import trace as trace_api
|
||||
from opentelemetry.sdk.trace import Event, Status, StatusCode
|
||||
from opentelemetry.sdk.trace import Event
|
||||
from opentelemetry.trace import Status, StatusCode
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
|
||||
@@ -155,7 +155,10 @@ class OpsTraceManager:
|
||||
if key in tracing_config:
|
||||
if "*" in tracing_config[key]:
|
||||
# If the key contains '*', retain the original value from the current config
|
||||
new_config[key] = current_trace_config.get(key, tracing_config[key])
|
||||
if current_trace_config:
|
||||
new_config[key] = current_trace_config.get(key, tracing_config[key])
|
||||
else:
|
||||
new_config[key] = tracing_config[key]
|
||||
else:
|
||||
# Otherwise, encrypt the key
|
||||
new_config[key] = encrypt_token(tenant_id, tracing_config[key])
|
||||
|
||||
@@ -62,7 +62,8 @@ class WeaveDataTrace(BaseTraceInstance):
|
||||
self,
|
||||
):
|
||||
try:
|
||||
project_url = f"https://wandb.ai/{self.weave_client._project_id()}"
|
||||
project_identifier = f"{self.entity}/{self.project_name}" if self.entity else self.project_name
|
||||
project_url = f"https://wandb.ai/{project_identifier}"
|
||||
return project_url
|
||||
except Exception as e:
|
||||
logger.debug("Weave get run url failed: %s", str(e))
|
||||
@@ -424,7 +425,23 @@ class WeaveDataTrace(BaseTraceInstance):
|
||||
raise ValueError(f"Weave API check failed: {str(e)}")
|
||||
|
||||
def start_call(self, run_data: WeaveTraceModel, parent_run_id: str | None = None):
|
||||
call = self.weave_client.create_call(op=run_data.op, inputs=run_data.inputs, attributes=run_data.attributes)
|
||||
inputs = run_data.inputs
|
||||
if inputs is None:
|
||||
inputs = {}
|
||||
elif not isinstance(inputs, dict):
|
||||
inputs = {"inputs": str(inputs)}
|
||||
|
||||
attributes = run_data.attributes
|
||||
if attributes is None:
|
||||
attributes = {}
|
||||
elif not isinstance(attributes, dict):
|
||||
attributes = {"attributes": str(attributes)}
|
||||
|
||||
call = self.weave_client.create_call(
|
||||
op=run_data.op,
|
||||
inputs=inputs,
|
||||
attributes=attributes,
|
||||
)
|
||||
self.calls[run_data.id] = call
|
||||
if parent_run_id:
|
||||
self.calls[run_data.id].parent_id = parent_run_id
|
||||
@@ -432,6 +449,7 @@ class WeaveDataTrace(BaseTraceInstance):
|
||||
def finish_call(self, run_data: WeaveTraceModel):
|
||||
call = self.calls.get(run_data.id)
|
||||
if call:
|
||||
self.weave_client.finish_call(call=call, output=run_data.outputs, exception=run_data.exception)
|
||||
exception = Exception(run_data.exception) if run_data.exception else None
|
||||
self.weave_client.finish_call(call=call, output=run_data.outputs, exception=exception)
|
||||
else:
|
||||
raise ValueError(f"Call with id {run_data.id} not found")
|
||||
|
||||
@@ -106,7 +106,9 @@ class RetrievalService:
|
||||
if exceptions:
|
||||
raise ValueError(";\n".join(exceptions))
|
||||
|
||||
# Deduplicate documents for hybrid search to avoid duplicate chunks
|
||||
if retrieval_method == RetrievalMethod.HYBRID_SEARCH.value:
|
||||
all_documents = cls._deduplicate_documents(all_documents)
|
||||
data_post_processor = DataPostProcessor(
|
||||
str(dataset.tenant_id), reranking_mode, reranking_model, weights, False
|
||||
)
|
||||
@@ -143,6 +145,40 @@ class RetrievalService:
|
||||
)
|
||||
return all_documents
|
||||
|
||||
@classmethod
|
||||
def _deduplicate_documents(cls, documents: list[Document]) -> list[Document]:
|
||||
"""Deduplicate documents based on doc_id to avoid duplicate chunks in hybrid search."""
|
||||
if not documents:
|
||||
return documents
|
||||
|
||||
unique_documents = []
|
||||
seen_doc_ids = set()
|
||||
|
||||
for document in documents:
|
||||
# For dify provider documents, use doc_id for deduplication
|
||||
if document.provider == "dify" and document.metadata is not None and "doc_id" in document.metadata:
|
||||
doc_id = document.metadata["doc_id"]
|
||||
if doc_id not in seen_doc_ids:
|
||||
seen_doc_ids.add(doc_id)
|
||||
unique_documents.append(document)
|
||||
# If duplicate, keep the one with higher score
|
||||
elif "score" in document.metadata:
|
||||
# Find existing document with same doc_id and compare scores
|
||||
for i, existing_doc in enumerate(unique_documents):
|
||||
if (
|
||||
existing_doc.metadata
|
||||
and existing_doc.metadata.get("doc_id") == doc_id
|
||||
and existing_doc.metadata.get("score", 0) < document.metadata.get("score", 0)
|
||||
):
|
||||
unique_documents[i] = document
|
||||
break
|
||||
else:
|
||||
# For non-dify documents, use content-based deduplication
|
||||
if document not in unique_documents:
|
||||
unique_documents.append(document)
|
||||
|
||||
return unique_documents
|
||||
|
||||
@classmethod
|
||||
def _get_dataset(cls, dataset_id: str) -> Dataset | None:
|
||||
with Session(db.engine) as session:
|
||||
|
||||
@@ -18,6 +18,10 @@ class DatasetRetrieverBaseTool(BaseModel, ABC):
|
||||
retriever_from: str
|
||||
model_config = ConfigDict(arbitrary_types_allowed=True)
|
||||
|
||||
def run(self, query: str) -> str:
|
||||
"""Use the tool."""
|
||||
return self._run(query)
|
||||
|
||||
@abstractmethod
|
||||
def _run(self, query: str) -> str:
|
||||
"""Use the tool.
|
||||
|
||||
@@ -124,7 +124,7 @@ class DatasetRetrieverTool(Tool):
|
||||
yield self.create_text_message(text="please input query")
|
||||
else:
|
||||
# invoke dataset retriever tool
|
||||
result = self.retrieval_tool._run(query=query)
|
||||
result = self.retrieval_tool.run(query=query)
|
||||
yield self.create_text_message(text=result)
|
||||
|
||||
def validate_credentials(
|
||||
|
||||
@@ -2,6 +2,7 @@ import re
|
||||
from json import dumps as json_dumps
|
||||
from json import loads as json_loads
|
||||
from json.decoder import JSONDecodeError
|
||||
from typing import Any
|
||||
|
||||
from flask import request
|
||||
from requests import get
|
||||
@@ -127,34 +128,34 @@ class ApiBasedToolSchemaParser:
|
||||
if "allOf" in prop_dict:
|
||||
del prop_dict["allOf"]
|
||||
|
||||
# parse body parameters
|
||||
if "schema" in interface["operation"]["requestBody"]["content"][content_type]:
|
||||
body_schema = interface["operation"]["requestBody"]["content"][content_type]["schema"]
|
||||
required = body_schema.get("required", [])
|
||||
properties = body_schema.get("properties", {})
|
||||
for name, property in properties.items():
|
||||
tool = ToolParameter(
|
||||
name=name,
|
||||
label=I18nObject(en_US=name, zh_Hans=name),
|
||||
human_description=I18nObject(
|
||||
en_US=property.get("description", ""), zh_Hans=property.get("description", "")
|
||||
),
|
||||
type=ToolParameter.ToolParameterType.STRING,
|
||||
required=name in required,
|
||||
form=ToolParameter.ToolParameterForm.LLM,
|
||||
llm_description=property.get("description", ""),
|
||||
default=property.get("default", None),
|
||||
placeholder=I18nObject(
|
||||
en_US=property.get("description", ""), zh_Hans=property.get("description", "")
|
||||
),
|
||||
)
|
||||
# parse body parameters
|
||||
if "schema" in interface["operation"]["requestBody"]["content"][content_type]:
|
||||
body_schema = interface["operation"]["requestBody"]["content"][content_type]["schema"]
|
||||
required = body_schema.get("required", [])
|
||||
properties = body_schema.get("properties", {})
|
||||
for name, property in properties.items():
|
||||
tool = ToolParameter(
|
||||
name=name,
|
||||
label=I18nObject(en_US=name, zh_Hans=name),
|
||||
human_description=I18nObject(
|
||||
en_US=property.get("description", ""), zh_Hans=property.get("description", "")
|
||||
),
|
||||
type=ToolParameter.ToolParameterType.STRING,
|
||||
required=name in required,
|
||||
form=ToolParameter.ToolParameterForm.LLM,
|
||||
llm_description=property.get("description", ""),
|
||||
default=property.get("default", None),
|
||||
placeholder=I18nObject(
|
||||
en_US=property.get("description", ""), zh_Hans=property.get("description", "")
|
||||
),
|
||||
)
|
||||
|
||||
# check if there is a type
|
||||
typ = ApiBasedToolSchemaParser._get_tool_parameter_type(property)
|
||||
if typ:
|
||||
tool.type = typ
|
||||
# check if there is a type
|
||||
typ = ApiBasedToolSchemaParser._get_tool_parameter_type(property)
|
||||
if typ:
|
||||
tool.type = typ
|
||||
|
||||
parameters.append(tool)
|
||||
parameters.append(tool)
|
||||
|
||||
# check if parameters is duplicated
|
||||
parameters_count = {}
|
||||
@@ -241,7 +242,9 @@ class ApiBasedToolSchemaParser:
|
||||
return ApiBasedToolSchemaParser.parse_openapi_to_tool_bundle(openapi, extra_info=extra_info, warning=warning)
|
||||
|
||||
@staticmethod
|
||||
def parse_swagger_to_openapi(swagger: dict, extra_info: dict | None = None, warning: dict | None = None):
|
||||
def parse_swagger_to_openapi(
|
||||
swagger: dict, extra_info: dict | None = None, warning: dict | None = None
|
||||
) -> dict[str, Any]:
|
||||
warning = warning or {}
|
||||
"""
|
||||
parse swagger to openapi
|
||||
@@ -257,7 +260,7 @@ class ApiBasedToolSchemaParser:
|
||||
if len(servers) == 0:
|
||||
raise ToolApiSchemaError("No server found in the swagger yaml.")
|
||||
|
||||
openapi = {
|
||||
converted_openapi: dict[str, Any] = {
|
||||
"openapi": "3.0.0",
|
||||
"info": {
|
||||
"title": info.get("title", "Swagger"),
|
||||
@@ -275,7 +278,7 @@ class ApiBasedToolSchemaParser:
|
||||
|
||||
# convert paths
|
||||
for path, path_item in swagger["paths"].items():
|
||||
openapi["paths"][path] = {}
|
||||
converted_openapi["paths"][path] = {}
|
||||
for method, operation in path_item.items():
|
||||
if "operationId" not in operation:
|
||||
raise ToolApiSchemaError(f"No operationId found in operation {method} {path}.")
|
||||
@@ -286,7 +289,7 @@ class ApiBasedToolSchemaParser:
|
||||
if warning is not None:
|
||||
warning["missing_summary"] = f"No summary or description found in operation {method} {path}."
|
||||
|
||||
openapi["paths"][path][method] = {
|
||||
converted_openapi["paths"][path][method] = {
|
||||
"operationId": operation["operationId"],
|
||||
"summary": operation.get("summary", ""),
|
||||
"description": operation.get("description", ""),
|
||||
@@ -295,13 +298,14 @@ class ApiBasedToolSchemaParser:
|
||||
}
|
||||
|
||||
if "requestBody" in operation:
|
||||
openapi["paths"][path][method]["requestBody"] = operation["requestBody"]
|
||||
converted_openapi["paths"][path][method]["requestBody"] = operation["requestBody"]
|
||||
|
||||
# convert definitions
|
||||
for name, definition in swagger["definitions"].items():
|
||||
openapi["components"]["schemas"][name] = definition
|
||||
if "definitions" in swagger:
|
||||
for name, definition in swagger["definitions"].items():
|
||||
converted_openapi["components"]["schemas"][name] = definition
|
||||
|
||||
return openapi
|
||||
return converted_openapi
|
||||
|
||||
@staticmethod
|
||||
def parse_openai_plugin_json_to_tool_bundle(
|
||||
|
||||
@@ -20,6 +20,7 @@ class ModelInvokeCompletedEvent(NodeEventBase):
|
||||
usage: LLMUsage
|
||||
finish_reason: str | None = None
|
||||
reasoning_content: str | None = None
|
||||
structured_output: dict | None = None
|
||||
|
||||
|
||||
class RunRetryEvent(NodeEventBase):
|
||||
|
||||
@@ -87,7 +87,7 @@ class Executor:
|
||||
node_data.authorization.config.api_key
|
||||
).text
|
||||
|
||||
self.url: str = node_data.url
|
||||
self.url = node_data.url
|
||||
self.method = node_data.method
|
||||
self.auth = node_data.authorization
|
||||
self.timeout = timeout
|
||||
@@ -349,11 +349,10 @@ class Executor:
|
||||
"timeout": (self.timeout.connect, self.timeout.read, self.timeout.write),
|
||||
"ssl_verify": self.ssl_verify,
|
||||
"follow_redirects": True,
|
||||
"max_retries": self.max_retries,
|
||||
}
|
||||
# request_args = {k: v for k, v in request_args.items() if v is not None}
|
||||
try:
|
||||
response: httpx.Response = _METHOD_MAP[method_lc](**request_args)
|
||||
response: httpx.Response = _METHOD_MAP[method_lc](**request_args, max_retries=self.max_retries)
|
||||
except (ssrf_proxy.MaxRetriesExceededError, httpx.RequestError) as e:
|
||||
raise HttpRequestNodeError(str(e)) from e
|
||||
# FIXME: fix type ignore, this maybe httpx type issue
|
||||
|
||||
@@ -165,6 +165,8 @@ class HttpRequestNode(Node):
|
||||
body_type = typed_node_data.body.type
|
||||
data = typed_node_data.body.data
|
||||
match body_type:
|
||||
case "none":
|
||||
pass
|
||||
case "binary":
|
||||
if len(data) != 1:
|
||||
raise RequestBodyError("invalid body data, should have only one item")
|
||||
|
||||
@@ -83,7 +83,7 @@ class IfElseNode(Node):
|
||||
else:
|
||||
# TODO: Update database then remove this
|
||||
# Fallback to old structure if cases are not defined
|
||||
input_conditions, group_result, final_result = _should_not_use_old_function( # ty: ignore [deprecated]
|
||||
input_conditions, group_result, final_result = _should_not_use_old_function( # pyright: ignore [reportDeprecated]
|
||||
condition_processor=condition_processor,
|
||||
variable_pool=self.graph_runtime_state.variable_pool,
|
||||
conditions=self._node_data.conditions or [],
|
||||
|
||||
@@ -136,6 +136,11 @@ class KnowledgeIndexNode(Node):
|
||||
document = db.session.query(Document).filter_by(id=document_id.value).first()
|
||||
if not document:
|
||||
raise KnowledgeIndexNodeError(f"Document {document_id.value} not found.")
|
||||
doc_id_value = document.id
|
||||
ds_id_value = dataset.id
|
||||
dataset_name_value = dataset.name
|
||||
document_name_value = document.name
|
||||
created_at_value = document.created_at
|
||||
# chunk nodes by chunk size
|
||||
indexing_start_at = time.perf_counter()
|
||||
index_processor = IndexProcessorFactory(dataset.chunk_structure).init_index_processor()
|
||||
@@ -161,16 +166,16 @@ class KnowledgeIndexNode(Node):
|
||||
document.word_count = (
|
||||
db.session.query(func.sum(DocumentSegment.word_count))
|
||||
.where(
|
||||
DocumentSegment.document_id == document.id,
|
||||
DocumentSegment.dataset_id == dataset.id,
|
||||
DocumentSegment.document_id == doc_id_value,
|
||||
DocumentSegment.dataset_id == ds_id_value,
|
||||
)
|
||||
.scalar()
|
||||
)
|
||||
db.session.add(document)
|
||||
# update document segment status
|
||||
db.session.query(DocumentSegment).where(
|
||||
DocumentSegment.document_id == document.id,
|
||||
DocumentSegment.dataset_id == dataset.id,
|
||||
DocumentSegment.document_id == doc_id_value,
|
||||
DocumentSegment.dataset_id == ds_id_value,
|
||||
).update(
|
||||
{
|
||||
DocumentSegment.status: "completed",
|
||||
@@ -182,13 +187,13 @@ class KnowledgeIndexNode(Node):
|
||||
db.session.commit()
|
||||
|
||||
return {
|
||||
"dataset_id": dataset.id,
|
||||
"dataset_name": dataset.name,
|
||||
"dataset_id": ds_id_value,
|
||||
"dataset_name": dataset_name_value,
|
||||
"batch": batch.value,
|
||||
"document_id": document.id,
|
||||
"document_name": document.name,
|
||||
"created_at": document.created_at.timestamp(),
|
||||
"display_status": document.indexing_status,
|
||||
"document_id": doc_id_value,
|
||||
"document_name": document_name_value,
|
||||
"created_at": created_at_value.timestamp(),
|
||||
"display_status": "completed",
|
||||
}
|
||||
|
||||
def _get_preview_output(self, chunk_structure: str, chunks: Any) -> Mapping[str, Any]:
|
||||
|
||||
@@ -107,7 +107,7 @@ class KnowledgeRetrievalNode(Node):
|
||||
graph_runtime_state=graph_runtime_state,
|
||||
)
|
||||
# LLM file outputs, used for MultiModal outputs.
|
||||
self._file_outputs: list[File] = []
|
||||
self._file_outputs = []
|
||||
|
||||
if llm_file_saver is None:
|
||||
llm_file_saver = FileSaverImpl(
|
||||
|
||||
@@ -161,6 +161,8 @@ class ListOperatorNode(Node):
|
||||
elif isinstance(variable, ArrayFileSegment):
|
||||
if isinstance(condition.value, str):
|
||||
value = self.graph_runtime_state.variable_pool.convert_template(condition.value).text
|
||||
elif isinstance(condition.value, bool):
|
||||
raise ValueError(f"File filter expects a string value, got {type(condition.value)}")
|
||||
else:
|
||||
value = condition.value
|
||||
filter_func = _get_file_filter_func(
|
||||
|
||||
@@ -46,7 +46,7 @@ class LLMFileSaver(tp.Protocol):
|
||||
dot (`.`). For example, `.py` and `.tar.gz` are both valid values, while `py`
|
||||
and `tar.gz` are not.
|
||||
"""
|
||||
pass
|
||||
raise NotImplementedError()
|
||||
|
||||
def save_remote_url(self, url: str, file_type: FileType) -> File:
|
||||
"""save_remote_url saves the file from a remote url returned by LLM.
|
||||
@@ -56,7 +56,7 @@ class LLMFileSaver(tp.Protocol):
|
||||
:param url: the url of the file.
|
||||
:param file_type: the file type of the file, check `FileType` enum for reference.
|
||||
"""
|
||||
pass
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
EngineFactory: tp.TypeAlias = tp.Callable[[], Engine]
|
||||
|
||||
@@ -23,6 +23,7 @@ from core.model_runtime.entities.llm_entities import (
|
||||
LLMResult,
|
||||
LLMResultChunk,
|
||||
LLMResultChunkWithStructuredOutput,
|
||||
LLMResultWithStructuredOutput,
|
||||
LLMStructuredOutput,
|
||||
LLMUsage,
|
||||
)
|
||||
@@ -127,7 +128,7 @@ class LLMNode(Node):
|
||||
graph_runtime_state=graph_runtime_state,
|
||||
)
|
||||
# LLM file outputs, used for MultiModal outputs.
|
||||
self._file_outputs: list[File] = []
|
||||
self._file_outputs = []
|
||||
|
||||
if llm_file_saver is None:
|
||||
llm_file_saver = FileSaverImpl(
|
||||
@@ -165,6 +166,7 @@ class LLMNode(Node):
|
||||
node_inputs: dict[str, Any] = {}
|
||||
process_data: dict[str, Any] = {}
|
||||
result_text = ""
|
||||
clean_text = ""
|
||||
usage = LLMUsage.empty_usage()
|
||||
finish_reason = None
|
||||
reasoning_content = None
|
||||
@@ -278,6 +280,13 @@ class LLMNode(Node):
|
||||
# Extract clean text from <think> tags
|
||||
clean_text, _ = LLMNode._split_reasoning(result_text, self._node_data.reasoning_format)
|
||||
|
||||
# Process structured output if available from the event.
|
||||
structured_output = (
|
||||
LLMStructuredOutput(structured_output=event.structured_output)
|
||||
if event.structured_output
|
||||
else None
|
||||
)
|
||||
|
||||
# deduct quota
|
||||
llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
|
||||
break
|
||||
@@ -1048,7 +1057,7 @@ class LLMNode(Node):
|
||||
@staticmethod
|
||||
def handle_blocking_result(
|
||||
*,
|
||||
invoke_result: LLMResult,
|
||||
invoke_result: LLMResult | LLMResultWithStructuredOutput,
|
||||
saver: LLMFileSaver,
|
||||
file_outputs: list["File"],
|
||||
reasoning_format: Literal["separated", "tagged"] = "tagged",
|
||||
@@ -1079,6 +1088,8 @@ class LLMNode(Node):
|
||||
finish_reason=None,
|
||||
# Reasoning content for workflow variables and downstream nodes
|
||||
reasoning_content=reasoning_content,
|
||||
# Pass structured output if enabled
|
||||
structured_output=getattr(invoke_result, "structured_output", None),
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
||||
@@ -179,6 +179,6 @@ CHAT_EXAMPLE = [
|
||||
"required": ["food"],
|
||||
},
|
||||
},
|
||||
"assistant": {"text": "I need to output a valid JSON object.", "json": {"result": "apple pie"}},
|
||||
"assistant": {"text": "I need to output a valid JSON object.", "json": {"food": "apple pie"}},
|
||||
},
|
||||
]
|
||||
|
||||
@@ -68,7 +68,7 @@ class QuestionClassifierNode(Node):
|
||||
graph_runtime_state=graph_runtime_state,
|
||||
)
|
||||
# LLM file outputs, used for MultiModal outputs.
|
||||
self._file_outputs: list[File] = []
|
||||
self._file_outputs = []
|
||||
|
||||
if llm_file_saver is None:
|
||||
llm_file_saver = FileSaverImpl(
|
||||
@@ -111,9 +111,9 @@ class QuestionClassifierNode(Node):
|
||||
query = variable.value if variable else None
|
||||
variables = {"query": query}
|
||||
# fetch model config
|
||||
model_instance, model_config = LLMNode._fetch_model_config(
|
||||
node_data_model=node_data.model,
|
||||
model_instance, model_config = llm_utils.fetch_model_config(
|
||||
tenant_id=self.tenant_id,
|
||||
node_data_model=node_data.model,
|
||||
)
|
||||
# fetch memory
|
||||
memory = llm_utils.fetch_memory(
|
||||
|
||||
@@ -416,4 +416,8 @@ class WorkflowEntry:
|
||||
|
||||
# append variable and value to variable pool
|
||||
if variable_node_id != ENVIRONMENT_VARIABLE_NODE_ID:
|
||||
# In single run, the input_value is set as the LLM's structured output value within the variable_pool.
|
||||
if len(variable_key_list) == 2 and variable_key_list[0] == "structured_output":
|
||||
input_value = {variable_key_list[1]: input_value}
|
||||
variable_key_list = variable_key_list[0:1]
|
||||
variable_pool.add([variable_node_id] + variable_key_list, input_value)
|
||||
|
||||
@@ -10,14 +10,14 @@ from dify_app import DifyApp
|
||||
|
||||
def init_app(app: DifyApp):
|
||||
@app.after_request
|
||||
def after_request(response):
|
||||
def after_request(response): # pyright: ignore[reportUnusedFunction]
|
||||
"""Add Version headers to the response."""
|
||||
response.headers.add("X-Version", dify_config.project.version)
|
||||
response.headers.add("X-Env", dify_config.DEPLOY_ENV)
|
||||
return response
|
||||
|
||||
@app.route("/health")
|
||||
def health():
|
||||
def health(): # pyright: ignore[reportUnusedFunction]
|
||||
return Response(
|
||||
json.dumps({"pid": os.getpid(), "status": "ok", "version": dify_config.project.version}),
|
||||
status=200,
|
||||
@@ -25,7 +25,7 @@ def init_app(app: DifyApp):
|
||||
)
|
||||
|
||||
@app.route("/threads")
|
||||
def threads():
|
||||
def threads(): # pyright: ignore[reportUnusedFunction]
|
||||
num_threads = threading.active_count()
|
||||
threads = threading.enumerate()
|
||||
|
||||
@@ -50,7 +50,7 @@ def init_app(app: DifyApp):
|
||||
}
|
||||
|
||||
@app.route("/db-pool-stat")
|
||||
def pool_stat():
|
||||
def pool_stat(): # pyright: ignore[reportUnusedFunction]
|
||||
from extensions.ext_database import db
|
||||
|
||||
engine = db.engine
|
||||
|
||||
@@ -10,7 +10,7 @@ from models.engine import db
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Global flag to avoid duplicate registration of event listener
|
||||
_GEVENT_COMPATIBILITY_SETUP: bool = False
|
||||
_gevent_compatibility_setup: bool = False
|
||||
|
||||
|
||||
def _safe_rollback(connection):
|
||||
@@ -26,14 +26,14 @@ def _safe_rollback(connection):
|
||||
|
||||
|
||||
def _setup_gevent_compatibility():
|
||||
global _GEVENT_COMPATIBILITY_SETUP # pylint: disable=global-statement
|
||||
global _gevent_compatibility_setup # pylint: disable=global-statement
|
||||
|
||||
# Avoid duplicate registration
|
||||
if _GEVENT_COMPATIBILITY_SETUP:
|
||||
if _gevent_compatibility_setup:
|
||||
return
|
||||
|
||||
@event.listens_for(Pool, "reset")
|
||||
def _safe_reset(dbapi_connection, connection_record, reset_state): # pylint: disable=unused-argument
|
||||
def _safe_reset(dbapi_connection, connection_record, reset_state): # pyright: ignore[reportUnusedFunction]
|
||||
if reset_state.terminate_only:
|
||||
return
|
||||
|
||||
@@ -47,7 +47,7 @@ def _setup_gevent_compatibility():
|
||||
except (AttributeError, ImportError):
|
||||
_safe_rollback(dbapi_connection)
|
||||
|
||||
_GEVENT_COMPATIBILITY_SETUP = True
|
||||
_gevent_compatibility_setup = True
|
||||
|
||||
|
||||
def init_app(app: DifyApp):
|
||||
|
||||
@@ -2,4 +2,4 @@ from dify_app import DifyApp
|
||||
|
||||
|
||||
def init_app(app: DifyApp):
|
||||
from events import event_handlers # noqa: F401
|
||||
from events import event_handlers # noqa: F401 # pyright: ignore[reportUnusedImport]
|
||||
|
||||
@@ -33,7 +33,9 @@ class AliyunOssStorage(BaseStorage):
|
||||
|
||||
def load_once(self, filename: str) -> bytes:
|
||||
obj = self.client.get_object(self.__wrapper_folder_filename(filename))
|
||||
data: bytes = obj.read()
|
||||
data = obj.read()
|
||||
if not isinstance(data, bytes):
|
||||
return b""
|
||||
return data
|
||||
|
||||
def load_stream(self, filename: str) -> Generator:
|
||||
|
||||
@@ -39,10 +39,10 @@ class AwsS3Storage(BaseStorage):
|
||||
self.client.head_bucket(Bucket=self.bucket_name)
|
||||
except ClientError as e:
|
||||
# if bucket not exists, create it
|
||||
if e.response["Error"]["Code"] == "404":
|
||||
if e.response.get("Error", {}).get("Code") == "404":
|
||||
self.client.create_bucket(Bucket=self.bucket_name)
|
||||
# if bucket is not accessible, pass, maybe the bucket is existing but not accessible
|
||||
elif e.response["Error"]["Code"] == "403":
|
||||
elif e.response.get("Error", {}).get("Code") == "403":
|
||||
pass
|
||||
else:
|
||||
# other error, raise exception
|
||||
@@ -55,7 +55,7 @@ class AwsS3Storage(BaseStorage):
|
||||
try:
|
||||
data: bytes = self.client.get_object(Bucket=self.bucket_name, Key=filename)["Body"].read()
|
||||
except ClientError as ex:
|
||||
if ex.response["Error"]["Code"] == "NoSuchKey":
|
||||
if ex.response.get("Error", {}).get("Code") == "NoSuchKey":
|
||||
raise FileNotFoundError("File not found")
|
||||
else:
|
||||
raise
|
||||
@@ -66,7 +66,7 @@ class AwsS3Storage(BaseStorage):
|
||||
response = self.client.get_object(Bucket=self.bucket_name, Key=filename)
|
||||
yield from response["Body"].iter_chunks()
|
||||
except ClientError as ex:
|
||||
if ex.response["Error"]["Code"] == "NoSuchKey":
|
||||
if ex.response.get("Error", {}).get("Code") == "NoSuchKey":
|
||||
raise FileNotFoundError("file not found")
|
||||
elif "reached max retries" in str(ex):
|
||||
raise ValueError("please do not request the same file too frequently")
|
||||
|
||||
@@ -27,24 +27,38 @@ class AzureBlobStorage(BaseStorage):
|
||||
self.credential = None
|
||||
|
||||
def save(self, filename, data):
|
||||
if not self.bucket_name:
|
||||
return
|
||||
|
||||
client = self._sync_client()
|
||||
blob_container = client.get_container_client(container=self.bucket_name)
|
||||
blob_container.upload_blob(filename, data)
|
||||
|
||||
def load_once(self, filename: str) -> bytes:
|
||||
if not self.bucket_name:
|
||||
raise FileNotFoundError("Azure bucket name is not configured.")
|
||||
|
||||
client = self._sync_client()
|
||||
blob = client.get_container_client(container=self.bucket_name)
|
||||
blob = blob.get_blob_client(blob=filename)
|
||||
data: bytes = blob.download_blob().readall()
|
||||
data = blob.download_blob().readall()
|
||||
if not isinstance(data, bytes):
|
||||
raise TypeError(f"Expected bytes from blob.readall(), got {type(data).__name__}")
|
||||
return data
|
||||
|
||||
def load_stream(self, filename: str) -> Generator:
|
||||
if not self.bucket_name:
|
||||
raise FileNotFoundError("Azure bucket name is not configured.")
|
||||
|
||||
client = self._sync_client()
|
||||
blob = client.get_blob_client(container=self.bucket_name, blob=filename)
|
||||
blob_data = blob.download_blob()
|
||||
yield from blob_data.chunks()
|
||||
|
||||
def download(self, filename, target_filepath):
|
||||
if not self.bucket_name:
|
||||
return
|
||||
|
||||
client = self._sync_client()
|
||||
|
||||
blob = client.get_blob_client(container=self.bucket_name, blob=filename)
|
||||
@@ -53,12 +67,18 @@ class AzureBlobStorage(BaseStorage):
|
||||
blob_data.readinto(my_blob)
|
||||
|
||||
def exists(self, filename):
|
||||
if not self.bucket_name:
|
||||
return False
|
||||
|
||||
client = self._sync_client()
|
||||
|
||||
blob = client.get_blob_client(container=self.bucket_name, blob=filename)
|
||||
return blob.exists()
|
||||
|
||||
def delete(self, filename):
|
||||
if not self.bucket_name:
|
||||
return
|
||||
|
||||
client = self._sync_client()
|
||||
|
||||
blob_container = client.get_container_client(container=self.bucket_name)
|
||||
|
||||
@@ -430,7 +430,7 @@ class ClickZettaVolumeStorage(BaseStorage):
|
||||
|
||||
rows = self._execute_sql(sql, fetch=True)
|
||||
|
||||
exists = len(rows) > 0
|
||||
exists = len(rows) > 0 if rows else False
|
||||
logger.debug("File %s exists check: %s", filename, exists)
|
||||
return exists
|
||||
except Exception as e:
|
||||
@@ -509,16 +509,17 @@ class ClickZettaVolumeStorage(BaseStorage):
|
||||
rows = self._execute_sql(sql, fetch=True)
|
||||
|
||||
result = []
|
||||
for row in rows:
|
||||
file_path = row[0] # relative_path column
|
||||
if rows:
|
||||
for row in rows:
|
||||
file_path = row[0] # relative_path column
|
||||
|
||||
# For User Volume, remove dify prefix from results
|
||||
dify_prefix_with_slash = f"{self._config.dify_prefix}/"
|
||||
if volume_prefix == "USER VOLUME" and file_path.startswith(dify_prefix_with_slash):
|
||||
file_path = file_path[len(dify_prefix_with_slash) :] # Remove prefix
|
||||
# For User Volume, remove dify prefix from results
|
||||
dify_prefix_with_slash = f"{self._config.dify_prefix}/"
|
||||
if volume_prefix == "USER VOLUME" and file_path.startswith(dify_prefix_with_slash):
|
||||
file_path = file_path[len(dify_prefix_with_slash) :] # Remove prefix
|
||||
|
||||
if files and not file_path.endswith("/") or directories and file_path.endswith("/"):
|
||||
result.append(file_path)
|
||||
if files and not file_path.endswith("/") or directories and file_path.endswith("/"):
|
||||
result.append(file_path)
|
||||
|
||||
logger.debug("Scanned %d items in path %s", len(result), path)
|
||||
return result
|
||||
|
||||
@@ -439,6 +439,11 @@ class VolumePermissionManager:
|
||||
self._permission_cache.clear()
|
||||
logger.debug("Permission cache cleared")
|
||||
|
||||
@property
|
||||
def volume_type(self) -> str | None:
|
||||
"""Get the volume type."""
|
||||
return self._volume_type
|
||||
|
||||
def get_permission_summary(self, dataset_id: str | None = None) -> dict[str, bool]:
|
||||
"""Get permission summary
|
||||
|
||||
@@ -632,13 +637,13 @@ def check_volume_permission(permission_manager: VolumePermissionManager, operati
|
||||
VolumePermissionError: If no permission
|
||||
"""
|
||||
if not permission_manager.validate_operation(operation, dataset_id):
|
||||
error_message = f"Permission denied for operation '{operation}' on {permission_manager._volume_type} volume"
|
||||
error_message = f"Permission denied for operation '{operation}' on {permission_manager.volume_type} volume"
|
||||
if dataset_id:
|
||||
error_message += f" (dataset: {dataset_id})"
|
||||
|
||||
raise VolumePermissionError(
|
||||
error_message,
|
||||
operation=operation,
|
||||
volume_type=permission_manager._volume_type or "unknown",
|
||||
volume_type=permission_manager.volume_type or "unknown",
|
||||
dataset_id=dataset_id,
|
||||
)
|
||||
|
||||
@@ -35,12 +35,16 @@ class GoogleCloudStorage(BaseStorage):
|
||||
def load_once(self, filename: str) -> bytes:
|
||||
bucket = self.client.get_bucket(self.bucket_name)
|
||||
blob = bucket.get_blob(filename)
|
||||
if blob is None:
|
||||
raise FileNotFoundError("File not found")
|
||||
data: bytes = blob.download_as_bytes()
|
||||
return data
|
||||
|
||||
def load_stream(self, filename: str) -> Generator:
|
||||
bucket = self.client.get_bucket(self.bucket_name)
|
||||
blob = bucket.get_blob(filename)
|
||||
if blob is None:
|
||||
raise FileNotFoundError("File not found")
|
||||
with blob.open(mode="rb") as blob_stream:
|
||||
while chunk := blob_stream.read(4096):
|
||||
yield chunk
|
||||
@@ -48,6 +52,8 @@ class GoogleCloudStorage(BaseStorage):
|
||||
def download(self, filename, target_filepath):
|
||||
bucket = self.client.get_bucket(self.bucket_name)
|
||||
blob = bucket.get_blob(filename)
|
||||
if blob is None:
|
||||
raise FileNotFoundError("File not found")
|
||||
blob.download_to_filename(target_filepath)
|
||||
|
||||
def exists(self, filename):
|
||||
|
||||
@@ -45,7 +45,7 @@ class HuaweiObsStorage(BaseStorage):
|
||||
|
||||
def _get_meta(self, filename):
|
||||
res = self.client.getObjectMetadata(bucketName=self.bucket_name, objectKey=filename)
|
||||
if res.status < 300:
|
||||
if res and res.status and res.status < 300:
|
||||
return res
|
||||
else:
|
||||
return None
|
||||
|
||||
@@ -29,7 +29,7 @@ class OracleOCIStorage(BaseStorage):
|
||||
try:
|
||||
data: bytes = self.client.get_object(Bucket=self.bucket_name, Key=filename)["Body"].read()
|
||||
except ClientError as ex:
|
||||
if ex.response["Error"]["Code"] == "NoSuchKey":
|
||||
if ex.response.get("Error", {}).get("Code") == "NoSuchKey":
|
||||
raise FileNotFoundError("File not found")
|
||||
else:
|
||||
raise
|
||||
@@ -40,7 +40,7 @@ class OracleOCIStorage(BaseStorage):
|
||||
response = self.client.get_object(Bucket=self.bucket_name, Key=filename)
|
||||
yield from response["Body"].iter_chunks()
|
||||
except ClientError as ex:
|
||||
if ex.response["Error"]["Code"] == "NoSuchKey":
|
||||
if ex.response.get("Error", {}).get("Code") == "NoSuchKey":
|
||||
raise FileNotFoundError("File not found")
|
||||
else:
|
||||
raise
|
||||
|
||||
@@ -46,13 +46,13 @@ class SupabaseStorage(BaseStorage):
|
||||
Path(target_filepath).write_bytes(result)
|
||||
|
||||
def exists(self, filename):
|
||||
result = self.client.storage.from_(self.bucket_name).list(filename)
|
||||
if result.count() > 0:
|
||||
result = self.client.storage.from_(self.bucket_name).list(path=filename)
|
||||
if len(result) > 0:
|
||||
return True
|
||||
return False
|
||||
|
||||
def delete(self, filename):
|
||||
self.client.storage.from_(self.bucket_name).remove(filename)
|
||||
self.client.storage.from_(self.bucket_name).remove([filename])
|
||||
|
||||
def bucket_exists(self):
|
||||
buckets = self.client.storage.list_buckets()
|
||||
|
||||
@@ -11,6 +11,14 @@ class VolcengineTosStorage(BaseStorage):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
if not dify_config.VOLCENGINE_TOS_ACCESS_KEY:
|
||||
raise ValueError("VOLCENGINE_TOS_ACCESS_KEY is not set")
|
||||
if not dify_config.VOLCENGINE_TOS_SECRET_KEY:
|
||||
raise ValueError("VOLCENGINE_TOS_SECRET_KEY is not set")
|
||||
if not dify_config.VOLCENGINE_TOS_ENDPOINT:
|
||||
raise ValueError("VOLCENGINE_TOS_ENDPOINT is not set")
|
||||
if not dify_config.VOLCENGINE_TOS_REGION:
|
||||
raise ValueError("VOLCENGINE_TOS_REGION is not set")
|
||||
self.bucket_name = dify_config.VOLCENGINE_TOS_BUCKET_NAME
|
||||
self.client = tos.TosClientV2(
|
||||
ak=dify_config.VOLCENGINE_TOS_ACCESS_KEY,
|
||||
@@ -20,27 +28,39 @@ class VolcengineTosStorage(BaseStorage):
|
||||
)
|
||||
|
||||
def save(self, filename, data):
|
||||
if not self.bucket_name:
|
||||
raise ValueError("VOLCENGINE_TOS_BUCKET_NAME is not set")
|
||||
self.client.put_object(bucket=self.bucket_name, key=filename, content=data)
|
||||
|
||||
def load_once(self, filename: str) -> bytes:
|
||||
if not self.bucket_name:
|
||||
raise FileNotFoundError("VOLCENGINE_TOS_BUCKET_NAME is not set")
|
||||
data = self.client.get_object(bucket=self.bucket_name, key=filename).read()
|
||||
if not isinstance(data, bytes):
|
||||
raise TypeError(f"Expected bytes, got {type(data).__name__}")
|
||||
return data
|
||||
|
||||
def load_stream(self, filename: str) -> Generator:
|
||||
if not self.bucket_name:
|
||||
raise FileNotFoundError("VOLCENGINE_TOS_BUCKET_NAME is not set")
|
||||
response = self.client.get_object(bucket=self.bucket_name, key=filename)
|
||||
while chunk := response.read(4096):
|
||||
yield chunk
|
||||
|
||||
def download(self, filename, target_filepath):
|
||||
if not self.bucket_name:
|
||||
raise ValueError("VOLCENGINE_TOS_BUCKET_NAME is not set")
|
||||
self.client.get_object_to_file(bucket=self.bucket_name, key=filename, file_path=target_filepath)
|
||||
|
||||
def exists(self, filename):
|
||||
if not self.bucket_name:
|
||||
return False
|
||||
res = self.client.head_object(bucket=self.bucket_name, key=filename)
|
||||
if res.status_code != 200:
|
||||
return False
|
||||
return True
|
||||
|
||||
def delete(self, filename):
|
||||
if not self.bucket_name:
|
||||
return
|
||||
self.client.delete_object(bucket=self.bucket_name, key=filename)
|
||||
|
||||
@@ -94,7 +94,7 @@ def register_external_error_handlers(api: Api):
|
||||
got_request_exception.send(current_app, exception=e)
|
||||
|
||||
status_code = 500
|
||||
data = getattr(e, "data", {"message": http_status_message(status_code)})
|
||||
data: dict[str, Any] = getattr(e, "data", {"message": http_status_message(status_code)})
|
||||
|
||||
# 🔒 Normalize non-mapping data (e.g., if someone set e.data = Response)
|
||||
if not isinstance(data, dict):
|
||||
|
||||
@@ -27,7 +27,7 @@ import gmpy2 # type: ignore
|
||||
from Crypto import Random
|
||||
from Crypto.Signature.pss import MGF1
|
||||
from Crypto.Util.number import bytes_to_long, ceil_div, long_to_bytes
|
||||
from Crypto.Util.py3compat import _copy_bytes, bord
|
||||
from Crypto.Util.py3compat import bord
|
||||
from Crypto.Util.strxor import strxor
|
||||
|
||||
|
||||
@@ -72,7 +72,7 @@ class PKCS1OAepCipher:
|
||||
else:
|
||||
self._mgf = lambda x, y: MGF1(x, y, self._hashObj)
|
||||
|
||||
self._label = _copy_bytes(None, None, label)
|
||||
self._label = bytes(label)
|
||||
self._randfunc = randfunc
|
||||
|
||||
def can_encrypt(self):
|
||||
@@ -120,7 +120,7 @@ class PKCS1OAepCipher:
|
||||
# Step 2b
|
||||
ps = b"\x00" * ps_len
|
||||
# Step 2c
|
||||
db = lHash + ps + b"\x01" + _copy_bytes(None, None, message)
|
||||
db = lHash + ps + b"\x01" + bytes(message)
|
||||
# Step 2d
|
||||
ros = self._randfunc(hLen)
|
||||
# Step 2e
|
||||
|
||||
@@ -14,7 +14,7 @@ class SendGridClient:
|
||||
|
||||
def send(self, mail: dict):
|
||||
logger.debug("Sending email with SendGrid")
|
||||
|
||||
_to = ""
|
||||
try:
|
||||
_to = mail["to"]
|
||||
|
||||
@@ -28,7 +28,7 @@ class SendGridClient:
|
||||
content = Content("text/html", mail["html"])
|
||||
sg_mail = Mail(from_email, to_email, subject, content)
|
||||
mail_json = sg_mail.get()
|
||||
response = sg.client.mail.send.post(request_body=mail_json) # ty: ignore [call-non-callable]
|
||||
response = sg.client.mail.send.post(request_body=mail_json) # type: ignore
|
||||
logger.debug(response.status_code)
|
||||
logger.debug(response.body)
|
||||
logger.debug(response.headers)
|
||||
|
||||
5
api/libs/validators.py
Normal file
5
api/libs/validators.py
Normal file
@@ -0,0 +1,5 @@
|
||||
def validate_description_length(description: str | None) -> str | None:
|
||||
"""Validate description length."""
|
||||
if description and len(description) > 400:
|
||||
raise ValueError("Description cannot exceed 400 characters.")
|
||||
return description
|
||||
@@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "dify-api"
|
||||
version = "1.9.0"
|
||||
version = "1.9.1"
|
||||
requires-python = ">=3.11,<3.13"
|
||||
|
||||
dependencies = [
|
||||
|
||||
@@ -1,17 +1,10 @@
|
||||
{
|
||||
"include": ["."],
|
||||
"exclude": [
|
||||
".venv",
|
||||
"tests/",
|
||||
".venv",
|
||||
"migrations/",
|
||||
"core/rag",
|
||||
"extensions",
|
||||
"libs",
|
||||
"controllers/console/datasets",
|
||||
"core/ops",
|
||||
"core/tools",
|
||||
"core/model_runtime",
|
||||
"core/workflow/nodes",
|
||||
"core/app/app_config/easy_ui_based_app/dataset"
|
||||
],
|
||||
"typeCheckingMode": "strict",
|
||||
@@ -40,4 +33,4 @@
|
||||
"reportAttributeAccessIssue": "hint",
|
||||
"pythonVersion": "3.11",
|
||||
"pythonPlatform": "All"
|
||||
}
|
||||
}
|
||||
@@ -52,7 +52,8 @@ def check_upgradable_plugin_task():
|
||||
strategy.include_plugins,
|
||||
)
|
||||
|
||||
if batch_interval_time > 0.0001: # if lower than 1ms, skip
|
||||
# Only sleep if batch_interval_time > 0.0001 AND current batch is not the last one
|
||||
if batch_interval_time > 0.0001 and i + MAX_CONCURRENT_CHECK_TASKS < total_strategies:
|
||||
time.sleep(batch_interval_time)
|
||||
|
||||
end_at = time.perf_counter()
|
||||
|
||||
@@ -93,7 +93,7 @@ logger = logging.getLogger(__name__)
|
||||
class DatasetService:
|
||||
@staticmethod
|
||||
def get_datasets(page, per_page, tenant_id=None, user=None, search=None, tag_ids=None, include_all=False):
|
||||
query = select(Dataset).where(Dataset.tenant_id == tenant_id).order_by(Dataset.created_at.desc())
|
||||
query = select(Dataset).where(Dataset.tenant_id == tenant_id).order_by(Dataset.created_at.desc(), Dataset.id)
|
||||
|
||||
if user:
|
||||
# get permitted dataset ids
|
||||
|
||||
@@ -79,7 +79,6 @@ class WorkflowConverter:
|
||||
new_app.updated_by = account.id
|
||||
db.session.add(new_app)
|
||||
db.session.flush()
|
||||
db.session.commit()
|
||||
|
||||
workflow.app_id = new_app.id
|
||||
db.session.commit()
|
||||
|
||||
@@ -11,8 +11,8 @@ from controllers.console.app import completion as completion_api
|
||||
from controllers.console.app import message as message_api
|
||||
from controllers.console.app import wraps
|
||||
from libs.datetime_utils import naive_utc_now
|
||||
from models import Account, App, Tenant
|
||||
from models.account import TenantAccountRole
|
||||
from models import App, Tenant
|
||||
from models.account import Account, TenantAccountJoin, TenantAccountRole
|
||||
from models.model import AppMode
|
||||
from services.app_generate_service import AppGenerateService
|
||||
|
||||
@@ -31,9 +31,8 @@ class TestChatMessageApiPermissions:
|
||||
return app
|
||||
|
||||
@pytest.fixture
|
||||
def mock_account(self):
|
||||
def mock_account(self, monkeypatch: pytest.MonkeyPatch):
|
||||
"""Create a mock Account for testing."""
|
||||
|
||||
account = Account()
|
||||
account.id = str(uuid.uuid4())
|
||||
account.name = "Test User"
|
||||
@@ -42,12 +41,24 @@ class TestChatMessageApiPermissions:
|
||||
account.created_at = naive_utc_now()
|
||||
account.updated_at = naive_utc_now()
|
||||
|
||||
# Create mock tenant
|
||||
tenant = Tenant()
|
||||
tenant.id = str(uuid.uuid4())
|
||||
tenant.name = "Test Tenant"
|
||||
|
||||
account._current_tenant = tenant
|
||||
mock_session_instance = mock.Mock()
|
||||
|
||||
mock_tenant_join = TenantAccountJoin(role=TenantAccountRole.OWNER)
|
||||
monkeypatch.setattr(mock_session_instance, "scalar", mock.Mock(return_value=mock_tenant_join))
|
||||
|
||||
mock_scalars_result = mock.Mock()
|
||||
mock_scalars_result.one.return_value = tenant
|
||||
monkeypatch.setattr(mock_session_instance, "scalars", mock.Mock(return_value=mock_scalars_result))
|
||||
|
||||
mock_session_context = mock.Mock()
|
||||
mock_session_context.__enter__.return_value = mock_session_instance
|
||||
monkeypatch.setattr("models.account.Session", lambda _, expire_on_commit: mock_session_context)
|
||||
|
||||
account.current_tenant = tenant
|
||||
return account
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
||||
@@ -18,124 +18,87 @@ class TestAppDescriptionValidationUnit:
|
||||
"""Unit tests for description validation function"""
|
||||
|
||||
def test_validate_description_length_function(self):
|
||||
"""Test the _validate_description_length function directly"""
|
||||
from controllers.console.app.app import _validate_description_length
|
||||
"""Test the validate_description_length function directly"""
|
||||
from libs.validators import validate_description_length
|
||||
|
||||
# Test valid descriptions
|
||||
assert _validate_description_length("") == ""
|
||||
assert _validate_description_length("x" * 400) == "x" * 400
|
||||
assert _validate_description_length(None) is None
|
||||
assert validate_description_length("") == ""
|
||||
assert validate_description_length("x" * 400) == "x" * 400
|
||||
assert validate_description_length(None) is None
|
||||
|
||||
# Test invalid descriptions
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
_validate_description_length("x" * 401)
|
||||
validate_description_length("x" * 401)
|
||||
assert "Description cannot exceed 400 characters." in str(exc_info.value)
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
_validate_description_length("x" * 500)
|
||||
validate_description_length("x" * 500)
|
||||
assert "Description cannot exceed 400 characters." in str(exc_info.value)
|
||||
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
_validate_description_length("x" * 1000)
|
||||
validate_description_length("x" * 1000)
|
||||
assert "Description cannot exceed 400 characters." in str(exc_info.value)
|
||||
|
||||
def test_validation_consistency_with_dataset(self):
|
||||
"""Test that App and Dataset validation functions are consistent"""
|
||||
from controllers.console.app.app import _validate_description_length as app_validate
|
||||
from controllers.console.datasets.datasets import _validate_description_length as dataset_validate
|
||||
from controllers.service_api.dataset.dataset import _validate_description_length as service_dataset_validate
|
||||
|
||||
# Test same valid inputs
|
||||
valid_desc = "x" * 400
|
||||
assert app_validate(valid_desc) == dataset_validate(valid_desc) == service_dataset_validate(valid_desc)
|
||||
assert app_validate("") == dataset_validate("") == service_dataset_validate("")
|
||||
assert app_validate(None) == dataset_validate(None) == service_dataset_validate(None)
|
||||
|
||||
# Test same invalid inputs produce same error
|
||||
invalid_desc = "x" * 401
|
||||
|
||||
app_error = None
|
||||
dataset_error = None
|
||||
service_dataset_error = None
|
||||
|
||||
try:
|
||||
app_validate(invalid_desc)
|
||||
except ValueError as e:
|
||||
app_error = str(e)
|
||||
|
||||
try:
|
||||
dataset_validate(invalid_desc)
|
||||
except ValueError as e:
|
||||
dataset_error = str(e)
|
||||
|
||||
try:
|
||||
service_dataset_validate(invalid_desc)
|
||||
except ValueError as e:
|
||||
service_dataset_error = str(e)
|
||||
|
||||
assert app_error == dataset_error == service_dataset_error
|
||||
assert app_error == "Description cannot exceed 400 characters."
|
||||
|
||||
def test_boundary_values(self):
|
||||
"""Test boundary values for description validation"""
|
||||
from controllers.console.app.app import _validate_description_length
|
||||
from libs.validators import validate_description_length
|
||||
|
||||
# Test exact boundary
|
||||
exactly_400 = "x" * 400
|
||||
assert _validate_description_length(exactly_400) == exactly_400
|
||||
assert validate_description_length(exactly_400) == exactly_400
|
||||
|
||||
# Test just over boundary
|
||||
just_over_400 = "x" * 401
|
||||
with pytest.raises(ValueError):
|
||||
_validate_description_length(just_over_400)
|
||||
validate_description_length(just_over_400)
|
||||
|
||||
# Test just under boundary
|
||||
just_under_400 = "x" * 399
|
||||
assert _validate_description_length(just_under_400) == just_under_400
|
||||
assert validate_description_length(just_under_400) == just_under_400
|
||||
|
||||
def test_edge_cases(self):
|
||||
"""Test edge cases for description validation"""
|
||||
from controllers.console.app.app import _validate_description_length
|
||||
from libs.validators import validate_description_length
|
||||
|
||||
# Test None input
|
||||
assert _validate_description_length(None) is None
|
||||
assert validate_description_length(None) is None
|
||||
|
||||
# Test empty string
|
||||
assert _validate_description_length("") == ""
|
||||
assert validate_description_length("") == ""
|
||||
|
||||
# Test single character
|
||||
assert _validate_description_length("a") == "a"
|
||||
assert validate_description_length("a") == "a"
|
||||
|
||||
# Test unicode characters
|
||||
unicode_desc = "测试" * 200 # 400 characters in Chinese
|
||||
assert _validate_description_length(unicode_desc) == unicode_desc
|
||||
assert validate_description_length(unicode_desc) == unicode_desc
|
||||
|
||||
# Test unicode over limit
|
||||
unicode_over = "测试" * 201 # 402 characters
|
||||
with pytest.raises(ValueError):
|
||||
_validate_description_length(unicode_over)
|
||||
validate_description_length(unicode_over)
|
||||
|
||||
def test_whitespace_handling(self):
|
||||
"""Test how validation handles whitespace"""
|
||||
from controllers.console.app.app import _validate_description_length
|
||||
from libs.validators import validate_description_length
|
||||
|
||||
# Test description with spaces
|
||||
spaces_400 = " " * 400
|
||||
assert _validate_description_length(spaces_400) == spaces_400
|
||||
assert validate_description_length(spaces_400) == spaces_400
|
||||
|
||||
# Test description with spaces over limit
|
||||
spaces_401 = " " * 401
|
||||
with pytest.raises(ValueError):
|
||||
_validate_description_length(spaces_401)
|
||||
validate_description_length(spaces_401)
|
||||
|
||||
# Test mixed content
|
||||
mixed_400 = "a" * 200 + " " * 200
|
||||
assert _validate_description_length(mixed_400) == mixed_400
|
||||
assert validate_description_length(mixed_400) == mixed_400
|
||||
|
||||
# Test mixed over limit
|
||||
mixed_401 = "a" * 200 + " " * 201
|
||||
with pytest.raises(ValueError):
|
||||
_validate_description_length(mixed_401)
|
||||
validate_description_length(mixed_401)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
@@ -9,8 +9,8 @@ from flask.testing import FlaskClient
|
||||
from controllers.console.app import model_config as model_config_api
|
||||
from controllers.console.app import wraps
|
||||
from libs.datetime_utils import naive_utc_now
|
||||
from models import Account, App, Tenant
|
||||
from models.account import TenantAccountRole
|
||||
from models import App, Tenant
|
||||
from models.account import Account, TenantAccountJoin, TenantAccountRole
|
||||
from models.model import AppMode
|
||||
from services.app_model_config_service import AppModelConfigService
|
||||
|
||||
@@ -30,9 +30,8 @@ class TestModelConfigResourcePermissions:
|
||||
return app
|
||||
|
||||
@pytest.fixture
|
||||
def mock_account(self):
|
||||
def mock_account(self, monkeypatch: pytest.MonkeyPatch):
|
||||
"""Create a mock Account for testing."""
|
||||
|
||||
account = Account()
|
||||
account.id = str(uuid.uuid4())
|
||||
account.name = "Test User"
|
||||
@@ -41,12 +40,24 @@ class TestModelConfigResourcePermissions:
|
||||
account.created_at = naive_utc_now()
|
||||
account.updated_at = naive_utc_now()
|
||||
|
||||
# Create mock tenant
|
||||
tenant = Tenant()
|
||||
tenant.id = str(uuid.uuid4())
|
||||
tenant.name = "Test Tenant"
|
||||
|
||||
account._current_tenant = tenant
|
||||
mock_session_instance = mock.Mock()
|
||||
|
||||
mock_tenant_join = TenantAccountJoin(role=TenantAccountRole.OWNER)
|
||||
monkeypatch.setattr(mock_session_instance, "scalar", mock.Mock(return_value=mock_tenant_join))
|
||||
|
||||
mock_scalars_result = mock.Mock()
|
||||
mock_scalars_result.one.return_value = tenant
|
||||
monkeypatch.setattr(mock_session_instance, "scalars", mock.Mock(return_value=mock_scalars_result))
|
||||
|
||||
mock_session_context = mock.Mock()
|
||||
mock_session_context.__enter__.return_value = mock_session_instance
|
||||
monkeypatch.setattr("models.account.Session", lambda _, expire_on_commit: mock_session_context)
|
||||
|
||||
account.current_tenant = tenant
|
||||
return account
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
||||
@@ -18,6 +18,7 @@ from flask.testing import FlaskClient
|
||||
from sqlalchemy import Engine, text
|
||||
from sqlalchemy.orm import Session
|
||||
from testcontainers.core.container import DockerContainer
|
||||
from testcontainers.core.network import Network
|
||||
from testcontainers.core.waiting_utils import wait_for_logs
|
||||
from testcontainers.postgres import PostgresContainer
|
||||
from testcontainers.redis import RedisContainer
|
||||
@@ -41,6 +42,7 @@ class DifyTestContainers:
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize container management with default configurations."""
|
||||
self.network: Network | None = None
|
||||
self.postgres: PostgresContainer | None = None
|
||||
self.redis: RedisContainer | None = None
|
||||
self.dify_sandbox: DockerContainer | None = None
|
||||
@@ -62,12 +64,18 @@ class DifyTestContainers:
|
||||
|
||||
logger.info("Starting test containers for Dify integration tests...")
|
||||
|
||||
# Create Docker network for container communication
|
||||
logger.info("Creating Docker network for container communication...")
|
||||
self.network = Network()
|
||||
self.network.create()
|
||||
logger.info("Docker network created successfully with name: %s", self.network.name)
|
||||
|
||||
# Start PostgreSQL container for main application database
|
||||
# PostgreSQL is used for storing user data, workflows, and application state
|
||||
logger.info("Initializing PostgreSQL container...")
|
||||
self.postgres = PostgresContainer(
|
||||
image="postgres:14-alpine",
|
||||
)
|
||||
).with_network(self.network)
|
||||
self.postgres.start()
|
||||
db_host = self.postgres.get_container_host_ip()
|
||||
db_port = self.postgres.get_exposed_port(5432)
|
||||
@@ -137,7 +145,7 @@ class DifyTestContainers:
|
||||
# Start Redis container for caching and session management
|
||||
# Redis is used for storing session data, cache entries, and temporary data
|
||||
logger.info("Initializing Redis container...")
|
||||
self.redis = RedisContainer(image="redis:6-alpine", port=6379)
|
||||
self.redis = RedisContainer(image="redis:6-alpine", port=6379).with_network(self.network)
|
||||
self.redis.start()
|
||||
redis_host = self.redis.get_container_host_ip()
|
||||
redis_port = self.redis.get_exposed_port(6379)
|
||||
@@ -153,7 +161,7 @@ class DifyTestContainers:
|
||||
# Start Dify Sandbox container for code execution environment
|
||||
# Dify Sandbox provides a secure environment for executing user code
|
||||
logger.info("Initializing Dify Sandbox container...")
|
||||
self.dify_sandbox = DockerContainer(image="langgenius/dify-sandbox:latest")
|
||||
self.dify_sandbox = DockerContainer(image="langgenius/dify-sandbox:latest").with_network(self.network)
|
||||
self.dify_sandbox.with_exposed_ports(8194)
|
||||
self.dify_sandbox.env = {
|
||||
"API_KEY": "test_api_key",
|
||||
@@ -173,22 +181,28 @@ class DifyTestContainers:
|
||||
# Start Dify Plugin Daemon container for plugin management
|
||||
# Dify Plugin Daemon provides plugin lifecycle management and execution
|
||||
logger.info("Initializing Dify Plugin Daemon container...")
|
||||
self.dify_plugin_daemon = DockerContainer(image="langgenius/dify-plugin-daemon:0.3.0-local")
|
||||
self.dify_plugin_daemon = DockerContainer(image="langgenius/dify-plugin-daemon:0.3.0-local").with_network(
|
||||
self.network
|
||||
)
|
||||
self.dify_plugin_daemon.with_exposed_ports(5002)
|
||||
# Get container internal network addresses
|
||||
postgres_container_name = self.postgres.get_wrapped_container().name
|
||||
redis_container_name = self.redis.get_wrapped_container().name
|
||||
|
||||
self.dify_plugin_daemon.env = {
|
||||
"DB_HOST": db_host,
|
||||
"DB_PORT": str(db_port),
|
||||
"DB_HOST": postgres_container_name, # Use container name for internal network communication
|
||||
"DB_PORT": "5432", # Use internal port
|
||||
"DB_USERNAME": self.postgres.username,
|
||||
"DB_PASSWORD": self.postgres.password,
|
||||
"DB_DATABASE": "dify_plugin",
|
||||
"REDIS_HOST": redis_host,
|
||||
"REDIS_PORT": str(redis_port),
|
||||
"REDIS_HOST": redis_container_name, # Use container name for internal network communication
|
||||
"REDIS_PORT": "6379", # Use internal port
|
||||
"REDIS_PASSWORD": "",
|
||||
"SERVER_PORT": "5002",
|
||||
"SERVER_KEY": "test_plugin_daemon_key",
|
||||
"MAX_PLUGIN_PACKAGE_SIZE": "52428800",
|
||||
"PPROF_ENABLED": "false",
|
||||
"DIFY_INNER_API_URL": f"http://{db_host}:5001",
|
||||
"DIFY_INNER_API_URL": f"http://{postgres_container_name}:5001",
|
||||
"DIFY_INNER_API_KEY": "test_inner_api_key",
|
||||
"PLUGIN_REMOTE_INSTALLING_HOST": "0.0.0.0",
|
||||
"PLUGIN_REMOTE_INSTALLING_PORT": "5003",
|
||||
@@ -253,6 +267,15 @@ class DifyTestContainers:
|
||||
# Log error but don't fail the test cleanup
|
||||
logger.warning("Failed to stop container %s: %s", container, e)
|
||||
|
||||
# Stop and remove the network
|
||||
if self.network:
|
||||
try:
|
||||
logger.info("Removing Docker network...")
|
||||
self.network.remove()
|
||||
logger.info("Successfully removed Docker network")
|
||||
except Exception as e:
|
||||
logger.warning("Failed to remove Docker network: %s", e)
|
||||
|
||||
self._containers_started = False
|
||||
logger.info("All test containers stopped and cleaned up successfully")
|
||||
|
||||
|
||||
@@ -784,133 +784,6 @@ class TestCleanDatasetTask:
|
||||
print(f"Total cleanup time: {cleanup_duration:.3f} seconds")
|
||||
print(f"Average time per document: {cleanup_duration / len(documents):.3f} seconds")
|
||||
|
||||
def test_clean_dataset_task_concurrent_cleanup_scenarios(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test dataset cleanup with concurrent cleanup scenarios and race conditions.
|
||||
|
||||
This test verifies that the task can properly:
|
||||
1. Handle multiple cleanup operations on the same dataset
|
||||
2. Prevent data corruption during concurrent access
|
||||
3. Maintain data consistency across multiple cleanup attempts
|
||||
4. Handle race conditions gracefully
|
||||
5. Ensure idempotent cleanup operations
|
||||
"""
|
||||
# Create test data
|
||||
account, tenant = self._create_test_account_and_tenant(db_session_with_containers)
|
||||
dataset = self._create_test_dataset(db_session_with_containers, account, tenant)
|
||||
document = self._create_test_document(db_session_with_containers, account, tenant, dataset)
|
||||
segment = self._create_test_segment(db_session_with_containers, account, tenant, dataset, document)
|
||||
upload_file = self._create_test_upload_file(db_session_with_containers, account, tenant)
|
||||
|
||||
# Update document with file reference
|
||||
import json
|
||||
|
||||
document.data_source_info = json.dumps({"upload_file_id": upload_file.id})
|
||||
from extensions.ext_database import db
|
||||
|
||||
db.session.commit()
|
||||
|
||||
# Save IDs for verification
|
||||
dataset_id = dataset.id
|
||||
tenant_id = tenant.id
|
||||
upload_file_id = upload_file.id
|
||||
|
||||
# Mock storage to simulate slow operations
|
||||
mock_storage = mock_external_service_dependencies["storage"]
|
||||
original_delete = mock_storage.delete
|
||||
|
||||
def slow_delete(key):
|
||||
import time
|
||||
|
||||
time.sleep(0.1) # Simulate slow storage operation
|
||||
return original_delete(key)
|
||||
|
||||
mock_storage.delete.side_effect = slow_delete
|
||||
|
||||
# Execute multiple cleanup operations concurrently
|
||||
import threading
|
||||
|
||||
cleanup_results = []
|
||||
cleanup_errors = []
|
||||
|
||||
def run_cleanup():
|
||||
try:
|
||||
clean_dataset_task(
|
||||
dataset_id=dataset_id,
|
||||
tenant_id=tenant_id,
|
||||
indexing_technique="high_quality",
|
||||
index_struct='{"type": "paragraph"}',
|
||||
collection_binding_id=str(uuid.uuid4()),
|
||||
doc_form="paragraph_index",
|
||||
)
|
||||
cleanup_results.append("success")
|
||||
except Exception as e:
|
||||
cleanup_errors.append(str(e))
|
||||
|
||||
# Start multiple cleanup threads
|
||||
threads = []
|
||||
for i in range(3):
|
||||
thread = threading.Thread(target=run_cleanup)
|
||||
threads.append(thread)
|
||||
thread.start()
|
||||
|
||||
# Wait for all threads to complete
|
||||
for thread in threads:
|
||||
thread.join()
|
||||
|
||||
# Verify results
|
||||
# Check that all documents were deleted (only once)
|
||||
remaining_documents = db.session.query(Document).filter_by(dataset_id=dataset_id).all()
|
||||
assert len(remaining_documents) == 0
|
||||
|
||||
# Check that all segments were deleted (only once)
|
||||
remaining_segments = db.session.query(DocumentSegment).filter_by(dataset_id=dataset_id).all()
|
||||
assert len(remaining_segments) == 0
|
||||
|
||||
# Check that upload file was deleted (only once)
|
||||
# Note: In concurrent scenarios, the first thread deletes documents and segments,
|
||||
# subsequent threads may not find the related data to clean up upload files
|
||||
# This demonstrates the idempotent nature of the cleanup process
|
||||
remaining_files = db.session.query(UploadFile).filter_by(id=upload_file_id).all()
|
||||
# The upload file should be deleted by the first successful cleanup operation
|
||||
# However, in concurrent scenarios, this may not always happen due to race conditions
|
||||
# This test demonstrates the idempotent nature of the cleanup process
|
||||
if len(remaining_files) > 0:
|
||||
print(f"Warning: Upload file {upload_file_id} was not deleted in concurrent scenario")
|
||||
print("This is expected behavior demonstrating the idempotent nature of cleanup")
|
||||
# We don't assert here as the behavior depends on timing and race conditions
|
||||
|
||||
# Verify that storage.delete was called (may be called multiple times in concurrent scenarios)
|
||||
# In concurrent scenarios, storage operations may be called multiple times due to race conditions
|
||||
assert mock_storage.delete.call_count > 0
|
||||
|
||||
# Verify that index processor was called (may be called multiple times in concurrent scenarios)
|
||||
mock_index_processor = mock_external_service_dependencies["index_processor"]
|
||||
assert mock_index_processor.clean.call_count > 0
|
||||
|
||||
# Check cleanup results
|
||||
assert len(cleanup_results) == 3, "All cleanup operations should complete"
|
||||
assert len(cleanup_errors) == 0, "No cleanup errors should occur"
|
||||
|
||||
# Verify idempotency by running cleanup again on the same dataset
|
||||
# This should not perform any additional operations since data is already cleaned
|
||||
clean_dataset_task(
|
||||
dataset_id=dataset_id,
|
||||
tenant_id=tenant_id,
|
||||
indexing_technique="high_quality",
|
||||
index_struct='{"type": "paragraph"}',
|
||||
collection_binding_id=str(uuid.uuid4()),
|
||||
doc_form="paragraph_index",
|
||||
)
|
||||
|
||||
# Verify that no additional storage operations were performed
|
||||
# Note: In concurrent scenarios, the exact count may vary due to race conditions
|
||||
print(f"Final storage delete calls: {mock_storage.delete.call_count}")
|
||||
print(f"Final index processor calls: {mock_index_processor.clean.call_count}")
|
||||
print("Note: Multiple calls in concurrent scenarios are expected due to race conditions")
|
||||
|
||||
def test_clean_dataset_task_storage_exception_handling(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
|
||||
@@ -0,0 +1,450 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from faker import Faker
|
||||
|
||||
from core.rag.index_processor.constant.index_type import IndexType
|
||||
from extensions.ext_database import db
|
||||
from extensions.ext_redis import redis_client
|
||||
from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole
|
||||
from models.dataset import Dataset, Document, DocumentSegment
|
||||
from tasks.enable_segments_to_index_task import enable_segments_to_index_task
|
||||
|
||||
|
||||
class TestEnableSegmentsToIndexTask:
|
||||
"""Integration tests for enable_segments_to_index_task using testcontainers."""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_external_service_dependencies(self):
|
||||
"""Mock setup for external service dependencies."""
|
||||
with (
|
||||
patch("tasks.enable_segments_to_index_task.IndexProcessorFactory") as mock_index_processor_factory,
|
||||
):
|
||||
# Setup mock index processor
|
||||
mock_processor = MagicMock()
|
||||
mock_index_processor_factory.return_value.init_index_processor.return_value = mock_processor
|
||||
|
||||
yield {
|
||||
"index_processor_factory": mock_index_processor_factory,
|
||||
"index_processor": mock_processor,
|
||||
}
|
||||
|
||||
def _create_test_dataset_and_document(self, db_session_with_containers, mock_external_service_dependencies):
|
||||
"""
|
||||
Helper method to create a test dataset and document for testing.
|
||||
|
||||
Args:
|
||||
db_session_with_containers: Database session from testcontainers infrastructure
|
||||
mock_external_service_dependencies: Mock dependencies
|
||||
|
||||
Returns:
|
||||
tuple: (dataset, document) - Created dataset and document instances
|
||||
"""
|
||||
fake = Faker()
|
||||
|
||||
# Create account and tenant
|
||||
account = Account(
|
||||
email=fake.email(),
|
||||
name=fake.name(),
|
||||
interface_language="en-US",
|
||||
status="active",
|
||||
)
|
||||
db.session.add(account)
|
||||
db.session.commit()
|
||||
|
||||
tenant = Tenant(
|
||||
name=fake.company(),
|
||||
status="normal",
|
||||
)
|
||||
db.session.add(tenant)
|
||||
db.session.commit()
|
||||
|
||||
# Create tenant-account join
|
||||
join = TenantAccountJoin(
|
||||
tenant_id=tenant.id,
|
||||
account_id=account.id,
|
||||
role=TenantAccountRole.OWNER.value,
|
||||
current=True,
|
||||
)
|
||||
db.session.add(join)
|
||||
db.session.commit()
|
||||
|
||||
# Create dataset
|
||||
dataset = Dataset(
|
||||
id=fake.uuid4(),
|
||||
tenant_id=tenant.id,
|
||||
name=fake.company(),
|
||||
description=fake.text(max_nb_chars=100),
|
||||
data_source_type="upload_file",
|
||||
indexing_technique="high_quality",
|
||||
created_by=account.id,
|
||||
)
|
||||
db.session.add(dataset)
|
||||
db.session.commit()
|
||||
|
||||
# Create document
|
||||
document = Document(
|
||||
id=fake.uuid4(),
|
||||
tenant_id=tenant.id,
|
||||
dataset_id=dataset.id,
|
||||
position=1,
|
||||
data_source_type="upload_file",
|
||||
batch="test_batch",
|
||||
name=fake.file_name(),
|
||||
created_from="upload_file",
|
||||
created_by=account.id,
|
||||
indexing_status="completed",
|
||||
enabled=True,
|
||||
doc_form=IndexType.PARAGRAPH_INDEX,
|
||||
)
|
||||
db.session.add(document)
|
||||
db.session.commit()
|
||||
|
||||
# Refresh dataset to ensure doc_form property works correctly
|
||||
db.session.refresh(dataset)
|
||||
|
||||
return dataset, document
|
||||
|
||||
def _create_test_segments(
|
||||
self, db_session_with_containers, document, dataset, count=3, enabled=False, status="completed"
|
||||
):
|
||||
"""
|
||||
Helper method to create test document segments.
|
||||
|
||||
Args:
|
||||
db_session_with_containers: Database session from testcontainers infrastructure
|
||||
document: Document instance
|
||||
dataset: Dataset instance
|
||||
count: Number of segments to create
|
||||
enabled: Whether segments should be enabled
|
||||
status: Status of the segments
|
||||
|
||||
Returns:
|
||||
list: List of created DocumentSegment instances
|
||||
"""
|
||||
fake = Faker()
|
||||
segments = []
|
||||
|
||||
for i in range(count):
|
||||
text = fake.text(max_nb_chars=200)
|
||||
segment = DocumentSegment(
|
||||
id=fake.uuid4(),
|
||||
tenant_id=document.tenant_id,
|
||||
dataset_id=dataset.id,
|
||||
document_id=document.id,
|
||||
position=i,
|
||||
content=text,
|
||||
word_count=len(text.split()),
|
||||
tokens=len(text.split()) * 2,
|
||||
index_node_id=f"node_{i}",
|
||||
index_node_hash=f"hash_{i}",
|
||||
enabled=enabled,
|
||||
status=status,
|
||||
created_by=document.created_by,
|
||||
)
|
||||
db.session.add(segment)
|
||||
segments.append(segment)
|
||||
|
||||
db.session.commit()
|
||||
return segments
|
||||
|
||||
def test_enable_segments_to_index_with_different_index_type(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test segments indexing with different index types.
|
||||
|
||||
This test verifies:
|
||||
- Proper handling of different index types
|
||||
- Index processor factory integration
|
||||
- Document processing with various configurations
|
||||
- Redis cache key deletion
|
||||
"""
|
||||
# Arrange: Create test data with different index type
|
||||
dataset, document = self._create_test_dataset_and_document(
|
||||
db_session_with_containers, mock_external_service_dependencies
|
||||
)
|
||||
|
||||
# Update document to use different index type
|
||||
document.doc_form = IndexType.QA_INDEX
|
||||
db.session.commit()
|
||||
|
||||
# Refresh dataset to ensure doc_form property reflects the updated document
|
||||
db.session.refresh(dataset)
|
||||
|
||||
# Create segments
|
||||
segments = self._create_test_segments(db_session_with_containers, document, dataset)
|
||||
|
||||
# Set up Redis cache keys
|
||||
segment_ids = [segment.id for segment in segments]
|
||||
for segment in segments:
|
||||
indexing_cache_key = f"segment_{segment.id}_indexing"
|
||||
redis_client.set(indexing_cache_key, "processing", ex=300)
|
||||
|
||||
# Act: Execute the task
|
||||
enable_segments_to_index_task(segment_ids, dataset.id, document.id)
|
||||
|
||||
# Assert: Verify different index type handling
|
||||
mock_external_service_dependencies["index_processor_factory"].assert_called_once_with(IndexType.QA_INDEX)
|
||||
mock_external_service_dependencies["index_processor"].load.assert_called_once()
|
||||
|
||||
# Verify the load method was called with correct parameters
|
||||
call_args = mock_external_service_dependencies["index_processor"].load.call_args
|
||||
assert call_args is not None
|
||||
documents = call_args[0][1] # Second argument should be documents list
|
||||
assert len(documents) == 3
|
||||
|
||||
# Verify Redis cache keys were deleted
|
||||
for segment in segments:
|
||||
indexing_cache_key = f"segment_{segment.id}_indexing"
|
||||
assert redis_client.exists(indexing_cache_key) == 0
|
||||
|
||||
def test_enable_segments_to_index_dataset_not_found(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test handling of non-existent dataset.
|
||||
|
||||
This test verifies:
|
||||
- Proper error handling for missing datasets
|
||||
- Early return without processing
|
||||
- Database session cleanup
|
||||
- No unnecessary index processor calls
|
||||
"""
|
||||
# Arrange: Use non-existent dataset ID
|
||||
fake = Faker()
|
||||
non_existent_dataset_id = fake.uuid4()
|
||||
non_existent_document_id = fake.uuid4()
|
||||
segment_ids = [fake.uuid4()]
|
||||
|
||||
# Act: Execute the task with non-existent dataset
|
||||
enable_segments_to_index_task(segment_ids, non_existent_dataset_id, non_existent_document_id)
|
||||
|
||||
# Assert: Verify no processing occurred
|
||||
mock_external_service_dependencies["index_processor_factory"].assert_not_called()
|
||||
mock_external_service_dependencies["index_processor"].load.assert_not_called()
|
||||
|
||||
def test_enable_segments_to_index_document_not_found(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test handling of non-existent document.
|
||||
|
||||
This test verifies:
|
||||
- Proper error handling for missing documents
|
||||
- Early return without processing
|
||||
- Database session cleanup
|
||||
- No unnecessary index processor calls
|
||||
"""
|
||||
# Arrange: Create dataset but use non-existent document ID
|
||||
dataset, _ = self._create_test_dataset_and_document(
|
||||
db_session_with_containers, mock_external_service_dependencies
|
||||
)
|
||||
fake = Faker()
|
||||
non_existent_document_id = fake.uuid4()
|
||||
segment_ids = [fake.uuid4()]
|
||||
|
||||
# Act: Execute the task with non-existent document
|
||||
enable_segments_to_index_task(segment_ids, dataset.id, non_existent_document_id)
|
||||
|
||||
# Assert: Verify no processing occurred
|
||||
mock_external_service_dependencies["index_processor_factory"].assert_not_called()
|
||||
mock_external_service_dependencies["index_processor"].load.assert_not_called()
|
||||
|
||||
def test_enable_segments_to_index_invalid_document_status(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test handling of document with invalid status.
|
||||
|
||||
This test verifies:
|
||||
- Early return when document is disabled, archived, or not completed
|
||||
- No index processing for documents not ready for indexing
|
||||
- Proper database session cleanup
|
||||
- No unnecessary external service calls
|
||||
"""
|
||||
# Arrange: Create test data with invalid document status
|
||||
dataset, document = self._create_test_dataset_and_document(
|
||||
db_session_with_containers, mock_external_service_dependencies
|
||||
)
|
||||
|
||||
# Test different invalid statuses
|
||||
invalid_statuses = [
|
||||
("disabled", {"enabled": False}),
|
||||
("archived", {"archived": True}),
|
||||
("not_completed", {"indexing_status": "processing"}),
|
||||
]
|
||||
|
||||
for _, status_attrs in invalid_statuses:
|
||||
# Reset document status
|
||||
document.enabled = True
|
||||
document.archived = False
|
||||
document.indexing_status = "completed"
|
||||
db.session.commit()
|
||||
|
||||
# Set invalid status
|
||||
for attr, value in status_attrs.items():
|
||||
setattr(document, attr, value)
|
||||
db.session.commit()
|
||||
|
||||
# Create segments
|
||||
segments = self._create_test_segments(db_session_with_containers, document, dataset)
|
||||
segment_ids = [segment.id for segment in segments]
|
||||
|
||||
# Act: Execute the task
|
||||
enable_segments_to_index_task(segment_ids, dataset.id, document.id)
|
||||
|
||||
# Assert: Verify no processing occurred
|
||||
mock_external_service_dependencies["index_processor_factory"].assert_not_called()
|
||||
mock_external_service_dependencies["index_processor"].load.assert_not_called()
|
||||
|
||||
# Clean up segments for next iteration
|
||||
for segment in segments:
|
||||
db.session.delete(segment)
|
||||
db.session.commit()
|
||||
|
||||
def test_enable_segments_to_index_segments_not_found(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test handling when no segments are found.
|
||||
|
||||
This test verifies:
|
||||
- Proper handling when segments don't exist
|
||||
- Early return without processing
|
||||
- Database session cleanup
|
||||
- Index processor is created but load is not called
|
||||
"""
|
||||
# Arrange: Create test data
|
||||
dataset, document = self._create_test_dataset_and_document(
|
||||
db_session_with_containers, mock_external_service_dependencies
|
||||
)
|
||||
|
||||
# Use non-existent segment IDs
|
||||
fake = Faker()
|
||||
non_existent_segment_ids = [fake.uuid4() for _ in range(3)]
|
||||
|
||||
# Act: Execute the task with non-existent segments
|
||||
enable_segments_to_index_task(non_existent_segment_ids, dataset.id, document.id)
|
||||
|
||||
# Assert: Verify index processor was created but load was not called
|
||||
mock_external_service_dependencies["index_processor_factory"].assert_called_once_with(IndexType.PARAGRAPH_INDEX)
|
||||
mock_external_service_dependencies["index_processor"].load.assert_not_called()
|
||||
|
||||
def test_enable_segments_to_index_with_parent_child_structure(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test segments indexing with parent-child structure.
|
||||
|
||||
This test verifies:
|
||||
- Proper handling of PARENT_CHILD_INDEX type
|
||||
- Child document creation from segments
|
||||
- Correct document structure for parent-child indexing
|
||||
- Index processor receives properly structured documents
|
||||
- Redis cache key deletion
|
||||
"""
|
||||
# Arrange: Create test data with parent-child index type
|
||||
dataset, document = self._create_test_dataset_and_document(
|
||||
db_session_with_containers, mock_external_service_dependencies
|
||||
)
|
||||
|
||||
# Update document to use parent-child index type
|
||||
document.doc_form = IndexType.PARENT_CHILD_INDEX
|
||||
db.session.commit()
|
||||
|
||||
# Refresh dataset to ensure doc_form property reflects the updated document
|
||||
db.session.refresh(dataset)
|
||||
|
||||
# Create segments with mock child chunks
|
||||
segments = self._create_test_segments(db_session_with_containers, document, dataset)
|
||||
|
||||
# Set up Redis cache keys
|
||||
segment_ids = [segment.id for segment in segments]
|
||||
for segment in segments:
|
||||
indexing_cache_key = f"segment_{segment.id}_indexing"
|
||||
redis_client.set(indexing_cache_key, "processing", ex=300)
|
||||
|
||||
# Mock the get_child_chunks method for each segment
|
||||
with patch.object(DocumentSegment, "get_child_chunks") as mock_get_child_chunks:
|
||||
# Setup mock to return child chunks for each segment
|
||||
mock_child_chunks = []
|
||||
for i in range(2): # Each segment has 2 child chunks
|
||||
mock_child = MagicMock()
|
||||
mock_child.content = f"child_content_{i}"
|
||||
mock_child.index_node_id = f"child_node_{i}"
|
||||
mock_child.index_node_hash = f"child_hash_{i}"
|
||||
mock_child_chunks.append(mock_child)
|
||||
|
||||
mock_get_child_chunks.return_value = mock_child_chunks
|
||||
|
||||
# Act: Execute the task
|
||||
enable_segments_to_index_task(segment_ids, dataset.id, document.id)
|
||||
|
||||
# Assert: Verify parent-child index processing
|
||||
mock_external_service_dependencies["index_processor_factory"].assert_called_once_with(
|
||||
IndexType.PARENT_CHILD_INDEX
|
||||
)
|
||||
mock_external_service_dependencies["index_processor"].load.assert_called_once()
|
||||
|
||||
# Verify the load method was called with correct parameters
|
||||
call_args = mock_external_service_dependencies["index_processor"].load.call_args
|
||||
assert call_args is not None
|
||||
documents = call_args[0][1] # Second argument should be documents list
|
||||
assert len(documents) == 3 # 3 segments
|
||||
|
||||
# Verify each document has children
|
||||
for doc in documents:
|
||||
assert hasattr(doc, "children")
|
||||
assert len(doc.children) == 2 # Each document has 2 children
|
||||
|
||||
# Verify Redis cache keys were deleted
|
||||
for segment in segments:
|
||||
indexing_cache_key = f"segment_{segment.id}_indexing"
|
||||
assert redis_client.exists(indexing_cache_key) == 0
|
||||
|
||||
def test_enable_segments_to_index_general_exception_handling(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test general exception handling during indexing process.
|
||||
|
||||
This test verifies:
|
||||
- Exceptions are properly caught and handled
|
||||
- Segment status is set to error
|
||||
- Segments are disabled
|
||||
- Error information is recorded
|
||||
- Redis cache is still cleared
|
||||
- Database session is properly closed
|
||||
"""
|
||||
# Arrange: Create test data
|
||||
dataset, document = self._create_test_dataset_and_document(
|
||||
db_session_with_containers, mock_external_service_dependencies
|
||||
)
|
||||
segments = self._create_test_segments(db_session_with_containers, document, dataset)
|
||||
|
||||
# Set up Redis cache keys
|
||||
segment_ids = [segment.id for segment in segments]
|
||||
for segment in segments:
|
||||
indexing_cache_key = f"segment_{segment.id}_indexing"
|
||||
redis_client.set(indexing_cache_key, "processing", ex=300)
|
||||
|
||||
# Mock the index processor to raise an exception
|
||||
mock_external_service_dependencies["index_processor"].load.side_effect = Exception("Index processing failed")
|
||||
|
||||
# Act: Execute the task
|
||||
enable_segments_to_index_task(segment_ids, dataset.id, document.id)
|
||||
|
||||
# Assert: Verify error handling
|
||||
for segment in segments:
|
||||
db.session.refresh(segment)
|
||||
assert segment.enabled is False
|
||||
assert segment.status == "error"
|
||||
assert segment.error is not None
|
||||
assert "Index processing failed" in segment.error
|
||||
assert segment.disabled_at is not None
|
||||
|
||||
# Verify Redis cache keys were still cleared despite error
|
||||
for segment in segments:
|
||||
indexing_cache_key = f"segment_{segment.id}_indexing"
|
||||
assert redis_client.exists(indexing_cache_key) == 0
|
||||
@@ -0,0 +1,242 @@
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from faker import Faker
|
||||
|
||||
from extensions.ext_database import db
|
||||
from libs.email_i18n import EmailType
|
||||
from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole
|
||||
from tasks.mail_account_deletion_task import send_account_deletion_verification_code, send_deletion_success_task
|
||||
|
||||
|
||||
class TestMailAccountDeletionTask:
|
||||
"""Integration tests for mail account deletion tasks using testcontainers."""
|
||||
|
||||
@pytest.fixture
|
||||
def mock_external_service_dependencies(self):
|
||||
"""Mock setup for external service dependencies."""
|
||||
with (
|
||||
patch("tasks.mail_account_deletion_task.mail") as mock_mail,
|
||||
patch("tasks.mail_account_deletion_task.get_email_i18n_service") as mock_get_email_service,
|
||||
):
|
||||
# Setup mock mail service
|
||||
mock_mail.is_inited.return_value = True
|
||||
|
||||
# Setup mock email service
|
||||
mock_email_service = MagicMock()
|
||||
mock_get_email_service.return_value = mock_email_service
|
||||
|
||||
yield {
|
||||
"mail": mock_mail,
|
||||
"get_email_service": mock_get_email_service,
|
||||
"email_service": mock_email_service,
|
||||
}
|
||||
|
||||
def _create_test_account(self, db_session_with_containers):
|
||||
"""
|
||||
Helper method to create a test account for testing.
|
||||
|
||||
Args:
|
||||
db_session_with_containers: Database session from testcontainers infrastructure
|
||||
|
||||
Returns:
|
||||
Account: Created account instance
|
||||
"""
|
||||
fake = Faker()
|
||||
|
||||
# Create account
|
||||
account = Account(
|
||||
email=fake.email(),
|
||||
name=fake.name(),
|
||||
interface_language="en-US",
|
||||
status="active",
|
||||
)
|
||||
db.session.add(account)
|
||||
db.session.commit()
|
||||
|
||||
# Create tenant
|
||||
tenant = Tenant(
|
||||
name=fake.company(),
|
||||
status="normal",
|
||||
)
|
||||
db.session.add(tenant)
|
||||
db.session.commit()
|
||||
|
||||
# Create tenant-account join
|
||||
join = TenantAccountJoin(
|
||||
tenant_id=tenant.id,
|
||||
account_id=account.id,
|
||||
role=TenantAccountRole.OWNER.value,
|
||||
current=True,
|
||||
)
|
||||
db.session.add(join)
|
||||
db.session.commit()
|
||||
|
||||
return account
|
||||
|
||||
def test_send_deletion_success_task_success(self, db_session_with_containers, mock_external_service_dependencies):
|
||||
"""
|
||||
Test successful account deletion success email sending.
|
||||
|
||||
This test verifies:
|
||||
- Proper email service initialization check
|
||||
- Correct email service method calls
|
||||
- Template context is properly formatted
|
||||
- Email type is correctly specified
|
||||
"""
|
||||
# Arrange: Create test data
|
||||
account = self._create_test_account(db_session_with_containers)
|
||||
test_email = account.email
|
||||
test_language = "en-US"
|
||||
|
||||
# Act: Execute the task
|
||||
send_deletion_success_task(test_email, test_language)
|
||||
|
||||
# Assert: Verify the expected outcomes
|
||||
# Verify mail service was checked
|
||||
mock_external_service_dependencies["mail"].is_inited.assert_called_once()
|
||||
|
||||
# Verify email service was retrieved
|
||||
mock_external_service_dependencies["get_email_service"].assert_called_once()
|
||||
|
||||
# Verify email was sent with correct parameters
|
||||
mock_external_service_dependencies["email_service"].send_email.assert_called_once_with(
|
||||
email_type=EmailType.ACCOUNT_DELETION_SUCCESS,
|
||||
language_code=test_language,
|
||||
to=test_email,
|
||||
template_context={
|
||||
"to": test_email,
|
||||
"email": test_email,
|
||||
},
|
||||
)
|
||||
|
||||
def test_send_deletion_success_task_mail_not_initialized(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test account deletion success email when mail service is not initialized.
|
||||
|
||||
This test verifies:
|
||||
- Early return when mail service is not initialized
|
||||
- No email service calls are made
|
||||
- No exceptions are raised
|
||||
"""
|
||||
# Arrange: Setup mail service to return not initialized
|
||||
mock_external_service_dependencies["mail"].is_inited.return_value = False
|
||||
account = self._create_test_account(db_session_with_containers)
|
||||
test_email = account.email
|
||||
|
||||
# Act: Execute the task
|
||||
send_deletion_success_task(test_email)
|
||||
|
||||
# Assert: Verify no email service calls were made
|
||||
mock_external_service_dependencies["get_email_service"].assert_not_called()
|
||||
mock_external_service_dependencies["email_service"].send_email.assert_not_called()
|
||||
|
||||
def test_send_deletion_success_task_email_service_exception(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test account deletion success email when email service raises exception.
|
||||
|
||||
This test verifies:
|
||||
- Exception is properly caught and logged
|
||||
- Task completes without raising exception
|
||||
- Error logging is recorded
|
||||
"""
|
||||
# Arrange: Setup email service to raise exception
|
||||
mock_external_service_dependencies["email_service"].send_email.side_effect = Exception("Email service failed")
|
||||
account = self._create_test_account(db_session_with_containers)
|
||||
test_email = account.email
|
||||
|
||||
# Act: Execute the task (should not raise exception)
|
||||
send_deletion_success_task(test_email)
|
||||
|
||||
# Assert: Verify email service was called but exception was handled
|
||||
mock_external_service_dependencies["email_service"].send_email.assert_called_once()
|
||||
|
||||
def test_send_account_deletion_verification_code_success(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test successful account deletion verification code email sending.
|
||||
|
||||
This test verifies:
|
||||
- Proper email service initialization check
|
||||
- Correct email service method calls
|
||||
- Template context includes verification code
|
||||
- Email type is correctly specified
|
||||
"""
|
||||
# Arrange: Create test data
|
||||
account = self._create_test_account(db_session_with_containers)
|
||||
test_email = account.email
|
||||
test_code = "123456"
|
||||
test_language = "en-US"
|
||||
|
||||
# Act: Execute the task
|
||||
send_account_deletion_verification_code(test_email, test_code, test_language)
|
||||
|
||||
# Assert: Verify the expected outcomes
|
||||
# Verify mail service was checked
|
||||
mock_external_service_dependencies["mail"].is_inited.assert_called_once()
|
||||
|
||||
# Verify email service was retrieved
|
||||
mock_external_service_dependencies["get_email_service"].assert_called_once()
|
||||
|
||||
# Verify email was sent with correct parameters
|
||||
mock_external_service_dependencies["email_service"].send_email.assert_called_once_with(
|
||||
email_type=EmailType.ACCOUNT_DELETION_VERIFICATION,
|
||||
language_code=test_language,
|
||||
to=test_email,
|
||||
template_context={
|
||||
"to": test_email,
|
||||
"code": test_code,
|
||||
},
|
||||
)
|
||||
|
||||
def test_send_account_deletion_verification_code_mail_not_initialized(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test account deletion verification code email when mail service is not initialized.
|
||||
|
||||
This test verifies:
|
||||
- Early return when mail service is not initialized
|
||||
- No email service calls are made
|
||||
- No exceptions are raised
|
||||
"""
|
||||
# Arrange: Setup mail service to return not initialized
|
||||
mock_external_service_dependencies["mail"].is_inited.return_value = False
|
||||
account = self._create_test_account(db_session_with_containers)
|
||||
test_email = account.email
|
||||
test_code = "123456"
|
||||
|
||||
# Act: Execute the task
|
||||
send_account_deletion_verification_code(test_email, test_code)
|
||||
|
||||
# Assert: Verify no email service calls were made
|
||||
mock_external_service_dependencies["get_email_service"].assert_not_called()
|
||||
mock_external_service_dependencies["email_service"].send_email.assert_not_called()
|
||||
|
||||
def test_send_account_deletion_verification_code_email_service_exception(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test account deletion verification code email when email service raises exception.
|
||||
|
||||
This test verifies:
|
||||
- Exception is properly caught and logged
|
||||
- Task completes without raising exception
|
||||
- Error logging is recorded
|
||||
"""
|
||||
# Arrange: Setup email service to raise exception
|
||||
mock_external_service_dependencies["email_service"].send_email.side_effect = Exception("Email service failed")
|
||||
account = self._create_test_account(db_session_with_containers)
|
||||
test_email = account.email
|
||||
test_code = "123456"
|
||||
|
||||
# Act: Execute the task (should not raise exception)
|
||||
send_account_deletion_verification_code(test_email, test_code)
|
||||
|
||||
# Assert: Verify email service was called but exception was handled
|
||||
mock_external_service_dependencies["email_service"].send_email.assert_called_once()
|
||||
@@ -0,0 +1,598 @@
|
||||
"""
|
||||
TestContainers-based integration tests for send_email_code_login_mail_task.
|
||||
|
||||
This module provides comprehensive integration tests for the email code login mail task
|
||||
using TestContainers infrastructure. The tests ensure that the task properly sends
|
||||
email verification codes for login with internationalization support and handles
|
||||
various error scenarios in a real database environment.
|
||||
|
||||
All tests use the testcontainers infrastructure to ensure proper database isolation
|
||||
and realistic testing scenarios with actual PostgreSQL and Redis instances.
|
||||
"""
|
||||
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from faker import Faker
|
||||
|
||||
from libs.email_i18n import EmailType
|
||||
from models.account import Account, Tenant, TenantAccountJoin, TenantAccountRole
|
||||
from tasks.mail_email_code_login import send_email_code_login_mail_task
|
||||
|
||||
|
||||
class TestSendEmailCodeLoginMailTask:
|
||||
"""
|
||||
Comprehensive integration tests for send_email_code_login_mail_task using testcontainers.
|
||||
|
||||
This test class covers all major functionality of the email code login mail task:
|
||||
- Successful email sending with different languages
|
||||
- Email service integration and template rendering
|
||||
- Error handling for various failure scenarios
|
||||
- Performance metrics and logging verification
|
||||
- Edge cases and boundary conditions
|
||||
|
||||
All tests use the testcontainers infrastructure to ensure proper database isolation
|
||||
and realistic testing environment with actual database interactions.
|
||||
"""
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def cleanup_database(self, db_session_with_containers):
|
||||
"""Clean up database before each test to ensure isolation."""
|
||||
from extensions.ext_redis import redis_client
|
||||
|
||||
# Clear all test data
|
||||
db_session_with_containers.query(TenantAccountJoin).delete()
|
||||
db_session_with_containers.query(Tenant).delete()
|
||||
db_session_with_containers.query(Account).delete()
|
||||
db_session_with_containers.commit()
|
||||
|
||||
# Clear Redis cache
|
||||
redis_client.flushdb()
|
||||
|
||||
@pytest.fixture
|
||||
def mock_external_service_dependencies(self):
|
||||
"""Mock setup for external service dependencies."""
|
||||
with (
|
||||
patch("tasks.mail_email_code_login.mail") as mock_mail,
|
||||
patch("tasks.mail_email_code_login.get_email_i18n_service") as mock_email_service,
|
||||
):
|
||||
# Setup default mock returns
|
||||
mock_mail.is_inited.return_value = True
|
||||
|
||||
# Mock email service
|
||||
mock_email_service_instance = MagicMock()
|
||||
mock_email_service_instance.send_email.return_value = None
|
||||
mock_email_service.return_value = mock_email_service_instance
|
||||
|
||||
yield {
|
||||
"mail": mock_mail,
|
||||
"email_service": mock_email_service,
|
||||
"email_service_instance": mock_email_service_instance,
|
||||
}
|
||||
|
||||
def _create_test_account(self, db_session_with_containers, fake=None):
|
||||
"""
|
||||
Helper method to create a test account for testing.
|
||||
|
||||
Args:
|
||||
db_session_with_containers: Database session from testcontainers infrastructure
|
||||
fake: Faker instance for generating test data
|
||||
|
||||
Returns:
|
||||
Account: Created account instance
|
||||
"""
|
||||
if fake is None:
|
||||
fake = Faker()
|
||||
|
||||
# Create account
|
||||
account = Account(
|
||||
email=fake.email(),
|
||||
name=fake.name(),
|
||||
interface_language="en-US",
|
||||
status="active",
|
||||
)
|
||||
|
||||
db_session_with_containers.add(account)
|
||||
db_session_with_containers.commit()
|
||||
|
||||
return account
|
||||
|
||||
def _create_test_tenant_and_account(self, db_session_with_containers, fake=None):
|
||||
"""
|
||||
Helper method to create a test tenant and account for testing.
|
||||
|
||||
Args:
|
||||
db_session_with_containers: Database session from testcontainers infrastructure
|
||||
fake: Faker instance for generating test data
|
||||
|
||||
Returns:
|
||||
tuple: (Account, Tenant) created instances
|
||||
"""
|
||||
if fake is None:
|
||||
fake = Faker()
|
||||
|
||||
# Create account using the existing helper method
|
||||
account = self._create_test_account(db_session_with_containers, fake)
|
||||
|
||||
# Create tenant
|
||||
tenant = Tenant(
|
||||
name=fake.company(),
|
||||
plan="basic",
|
||||
status="active",
|
||||
)
|
||||
|
||||
db_session_with_containers.add(tenant)
|
||||
db_session_with_containers.commit()
|
||||
|
||||
# Create tenant-account relationship
|
||||
tenant_account_join = TenantAccountJoin(
|
||||
tenant_id=tenant.id,
|
||||
account_id=account.id,
|
||||
role=TenantAccountRole.OWNER,
|
||||
)
|
||||
|
||||
db_session_with_containers.add(tenant_account_join)
|
||||
db_session_with_containers.commit()
|
||||
|
||||
return account, tenant
|
||||
|
||||
def test_send_email_code_login_mail_task_success_english(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test successful email code login mail sending in English.
|
||||
|
||||
This test verifies that the task can successfully:
|
||||
1. Send email code login mail with English language
|
||||
2. Use proper email service integration
|
||||
3. Pass correct template context to email service
|
||||
4. Log performance metrics correctly
|
||||
5. Complete task execution without errors
|
||||
"""
|
||||
# Arrange: Setup test data
|
||||
fake = Faker()
|
||||
test_email = fake.email()
|
||||
test_code = "123456"
|
||||
test_language = "en-US"
|
||||
|
||||
# Act: Execute the task
|
||||
send_email_code_login_mail_task(
|
||||
language=test_language,
|
||||
to=test_email,
|
||||
code=test_code,
|
||||
)
|
||||
|
||||
# Assert: Verify expected outcomes
|
||||
mock_mail = mock_external_service_dependencies["mail"]
|
||||
mock_email_service_instance = mock_external_service_dependencies["email_service_instance"]
|
||||
|
||||
# Verify mail service was checked for initialization
|
||||
mock_mail.is_inited.assert_called_once()
|
||||
|
||||
# Verify email service was called with correct parameters
|
||||
mock_email_service_instance.send_email.assert_called_once_with(
|
||||
email_type=EmailType.EMAIL_CODE_LOGIN,
|
||||
language_code=test_language,
|
||||
to=test_email,
|
||||
template_context={
|
||||
"to": test_email,
|
||||
"code": test_code,
|
||||
},
|
||||
)
|
||||
|
||||
def test_send_email_code_login_mail_task_success_chinese(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test successful email code login mail sending in Chinese.
|
||||
|
||||
This test verifies that the task can successfully:
|
||||
1. Send email code login mail with Chinese language
|
||||
2. Handle different language codes properly
|
||||
3. Use correct template context for Chinese emails
|
||||
4. Complete task execution without errors
|
||||
"""
|
||||
# Arrange: Setup test data
|
||||
fake = Faker()
|
||||
test_email = fake.email()
|
||||
test_code = "789012"
|
||||
test_language = "zh-Hans"
|
||||
|
||||
# Act: Execute the task
|
||||
send_email_code_login_mail_task(
|
||||
language=test_language,
|
||||
to=test_email,
|
||||
code=test_code,
|
||||
)
|
||||
|
||||
# Assert: Verify expected outcomes
|
||||
mock_email_service_instance = mock_external_service_dependencies["email_service_instance"]
|
||||
|
||||
# Verify email service was called with Chinese language
|
||||
mock_email_service_instance.send_email.assert_called_once_with(
|
||||
email_type=EmailType.EMAIL_CODE_LOGIN,
|
||||
language_code=test_language,
|
||||
to=test_email,
|
||||
template_context={
|
||||
"to": test_email,
|
||||
"code": test_code,
|
||||
},
|
||||
)
|
||||
|
||||
def test_send_email_code_login_mail_task_success_multiple_languages(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test successful email code login mail sending with multiple languages.
|
||||
|
||||
This test verifies that the task can successfully:
|
||||
1. Handle various language codes correctly
|
||||
2. Send emails with different language configurations
|
||||
3. Maintain proper template context for each language
|
||||
4. Complete multiple task executions without conflicts
|
||||
"""
|
||||
# Arrange: Setup test data
|
||||
fake = Faker()
|
||||
test_languages = ["en-US", "zh-Hans", "zh-CN", "ja-JP", "ko-KR"]
|
||||
test_emails = [fake.email() for _ in test_languages]
|
||||
test_codes = [fake.numerify("######") for _ in test_languages]
|
||||
|
||||
# Act: Execute the task for each language
|
||||
for i, language in enumerate(test_languages):
|
||||
send_email_code_login_mail_task(
|
||||
language=language,
|
||||
to=test_emails[i],
|
||||
code=test_codes[i],
|
||||
)
|
||||
|
||||
# Assert: Verify expected outcomes
|
||||
mock_email_service_instance = mock_external_service_dependencies["email_service_instance"]
|
||||
|
||||
# Verify email service was called for each language
|
||||
assert mock_email_service_instance.send_email.call_count == len(test_languages)
|
||||
|
||||
# Verify each call had correct parameters
|
||||
for i, language in enumerate(test_languages):
|
||||
call_args = mock_email_service_instance.send_email.call_args_list[i]
|
||||
assert call_args[1]["email_type"] == EmailType.EMAIL_CODE_LOGIN
|
||||
assert call_args[1]["language_code"] == language
|
||||
assert call_args[1]["to"] == test_emails[i]
|
||||
assert call_args[1]["template_context"]["code"] == test_codes[i]
|
||||
|
||||
def test_send_email_code_login_mail_task_mail_not_initialized(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test email code login mail task when mail service is not initialized.
|
||||
|
||||
This test verifies that the task can properly:
|
||||
1. Check mail service initialization status
|
||||
2. Return early when mail is not initialized
|
||||
3. Not attempt to send email when service is unavailable
|
||||
4. Handle gracefully without errors
|
||||
"""
|
||||
# Arrange: Setup test data
|
||||
fake = Faker()
|
||||
test_email = fake.email()
|
||||
test_code = "123456"
|
||||
test_language = "en-US"
|
||||
|
||||
# Mock mail service as not initialized
|
||||
mock_mail = mock_external_service_dependencies["mail"]
|
||||
mock_mail.is_inited.return_value = False
|
||||
|
||||
# Act: Execute the task
|
||||
send_email_code_login_mail_task(
|
||||
language=test_language,
|
||||
to=test_email,
|
||||
code=test_code,
|
||||
)
|
||||
|
||||
# Assert: Verify expected outcomes
|
||||
mock_email_service_instance = mock_external_service_dependencies["email_service_instance"]
|
||||
|
||||
# Verify mail service was checked for initialization
|
||||
mock_mail.is_inited.assert_called_once()
|
||||
|
||||
# Verify email service was not called
|
||||
mock_email_service_instance.send_email.assert_not_called()
|
||||
|
||||
def test_send_email_code_login_mail_task_email_service_exception(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test email code login mail task when email service raises an exception.
|
||||
|
||||
This test verifies that the task can properly:
|
||||
1. Handle email service exceptions gracefully
|
||||
2. Log appropriate error messages
|
||||
3. Continue execution without crashing
|
||||
4. Maintain proper error handling
|
||||
"""
|
||||
# Arrange: Setup test data
|
||||
fake = Faker()
|
||||
test_email = fake.email()
|
||||
test_code = "123456"
|
||||
test_language = "en-US"
|
||||
|
||||
# Mock email service to raise an exception
|
||||
mock_email_service_instance = mock_external_service_dependencies["email_service_instance"]
|
||||
mock_email_service_instance.send_email.side_effect = Exception("Email service unavailable")
|
||||
|
||||
# Act: Execute the task - it should handle the exception gracefully
|
||||
send_email_code_login_mail_task(
|
||||
language=test_language,
|
||||
to=test_email,
|
||||
code=test_code,
|
||||
)
|
||||
|
||||
# Assert: Verify expected outcomes
|
||||
mock_mail = mock_external_service_dependencies["mail"]
|
||||
mock_email_service_instance = mock_external_service_dependencies["email_service_instance"]
|
||||
|
||||
# Verify mail service was checked for initialization
|
||||
mock_mail.is_inited.assert_called_once()
|
||||
|
||||
# Verify email service was called (and failed)
|
||||
mock_email_service_instance.send_email.assert_called_once_with(
|
||||
email_type=EmailType.EMAIL_CODE_LOGIN,
|
||||
language_code=test_language,
|
||||
to=test_email,
|
||||
template_context={
|
||||
"to": test_email,
|
||||
"code": test_code,
|
||||
},
|
||||
)
|
||||
|
||||
def test_send_email_code_login_mail_task_invalid_parameters(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test email code login mail task with invalid parameters.
|
||||
|
||||
This test verifies that the task can properly:
|
||||
1. Handle empty or None email addresses
|
||||
2. Process empty or None verification codes
|
||||
3. Handle invalid language codes
|
||||
4. Maintain proper error handling for invalid inputs
|
||||
"""
|
||||
# Arrange: Setup test data
|
||||
fake = Faker()
|
||||
test_language = "en-US"
|
||||
|
||||
# Test cases for invalid parameters
|
||||
invalid_test_cases = [
|
||||
{"email": "", "code": "123456", "description": "empty email"},
|
||||
{"email": None, "code": "123456", "description": "None email"},
|
||||
{"email": fake.email(), "code": "", "description": "empty code"},
|
||||
{"email": fake.email(), "code": None, "description": "None code"},
|
||||
{"email": "invalid-email", "code": "123456", "description": "invalid email format"},
|
||||
]
|
||||
|
||||
for test_case in invalid_test_cases:
|
||||
# Reset mocks for each test case
|
||||
mock_email_service_instance = mock_external_service_dependencies["email_service_instance"]
|
||||
mock_email_service_instance.reset_mock()
|
||||
|
||||
# Act: Execute the task with invalid parameters
|
||||
send_email_code_login_mail_task(
|
||||
language=test_language,
|
||||
to=test_case["email"],
|
||||
code=test_case["code"],
|
||||
)
|
||||
|
||||
# Assert: Verify that email service was still called
|
||||
# The task should pass parameters to email service as-is
|
||||
# and let the email service handle validation
|
||||
mock_email_service_instance.send_email.assert_called_once()
|
||||
|
||||
def test_send_email_code_login_mail_task_edge_cases(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test email code login mail task with edge cases and boundary conditions.
|
||||
|
||||
This test verifies that the task can properly:
|
||||
1. Handle very long email addresses
|
||||
2. Process very long verification codes
|
||||
3. Handle special characters in parameters
|
||||
4. Process extreme language codes
|
||||
"""
|
||||
# Arrange: Setup test data
|
||||
fake = Faker()
|
||||
test_language = "en-US"
|
||||
|
||||
# Edge case test data
|
||||
edge_cases = [
|
||||
{
|
||||
"email": "a" * 100 + "@example.com", # Very long email
|
||||
"code": "1" * 20, # Very long code
|
||||
"description": "very long email and code",
|
||||
},
|
||||
{
|
||||
"email": "test+tag@example.com", # Email with special characters
|
||||
"code": "123-456", # Code with special characters
|
||||
"description": "special characters",
|
||||
},
|
||||
{
|
||||
"email": "test@sub.domain.example.com", # Complex domain
|
||||
"code": "000000", # All zeros
|
||||
"description": "complex domain and all zeros code",
|
||||
},
|
||||
{
|
||||
"email": "test@example.co.uk", # International domain
|
||||
"code": "999999", # All nines
|
||||
"description": "international domain and all nines code",
|
||||
},
|
||||
]
|
||||
|
||||
for test_case in edge_cases:
|
||||
# Reset mocks for each test case
|
||||
mock_email_service_instance = mock_external_service_dependencies["email_service_instance"]
|
||||
mock_email_service_instance.reset_mock()
|
||||
|
||||
# Act: Execute the task with edge case data
|
||||
send_email_code_login_mail_task(
|
||||
language=test_language,
|
||||
to=test_case["email"],
|
||||
code=test_case["code"],
|
||||
)
|
||||
|
||||
# Assert: Verify that email service was called with edge case data
|
||||
mock_email_service_instance.send_email.assert_called_once_with(
|
||||
email_type=EmailType.EMAIL_CODE_LOGIN,
|
||||
language_code=test_language,
|
||||
to=test_case["email"],
|
||||
template_context={
|
||||
"to": test_case["email"],
|
||||
"code": test_case["code"],
|
||||
},
|
||||
)
|
||||
|
||||
def test_send_email_code_login_mail_task_database_integration(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test email code login mail task with database integration.
|
||||
|
||||
This test verifies that the task can properly:
|
||||
1. Work with real database connections
|
||||
2. Handle database session management
|
||||
3. Maintain proper database state
|
||||
4. Complete without database-related errors
|
||||
"""
|
||||
# Arrange: Setup test data with database
|
||||
fake = Faker()
|
||||
account, tenant = self._create_test_tenant_and_account(db_session_with_containers, fake)
|
||||
|
||||
test_email = account.email
|
||||
test_code = "123456"
|
||||
test_language = "en-US"
|
||||
|
||||
# Act: Execute the task
|
||||
send_email_code_login_mail_task(
|
||||
language=test_language,
|
||||
to=test_email,
|
||||
code=test_code,
|
||||
)
|
||||
|
||||
# Assert: Verify expected outcomes
|
||||
mock_email_service_instance = mock_external_service_dependencies["email_service_instance"]
|
||||
|
||||
# Verify email service was called with database account email
|
||||
mock_email_service_instance.send_email.assert_called_once_with(
|
||||
email_type=EmailType.EMAIL_CODE_LOGIN,
|
||||
language_code=test_language,
|
||||
to=test_email,
|
||||
template_context={
|
||||
"to": test_email,
|
||||
"code": test_code,
|
||||
},
|
||||
)
|
||||
|
||||
# Verify database state is maintained
|
||||
db_session_with_containers.refresh(account)
|
||||
assert account.email == test_email
|
||||
assert account.status == "active"
|
||||
|
||||
def test_send_email_code_login_mail_task_redis_integration(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test email code login mail task with Redis integration.
|
||||
|
||||
This test verifies that the task can properly:
|
||||
1. Work with Redis cache connections
|
||||
2. Handle Redis operations without errors
|
||||
3. Maintain proper cache state
|
||||
4. Complete without Redis-related errors
|
||||
"""
|
||||
# Arrange: Setup test data
|
||||
fake = Faker()
|
||||
test_email = fake.email()
|
||||
test_code = "123456"
|
||||
test_language = "en-US"
|
||||
|
||||
# Setup Redis cache data
|
||||
from extensions.ext_redis import redis_client
|
||||
|
||||
cache_key = f"email_code_login_test_{test_email}"
|
||||
redis_client.set(cache_key, "test_value", ex=300)
|
||||
|
||||
# Act: Execute the task
|
||||
send_email_code_login_mail_task(
|
||||
language=test_language,
|
||||
to=test_email,
|
||||
code=test_code,
|
||||
)
|
||||
|
||||
# Assert: Verify expected outcomes
|
||||
mock_email_service_instance = mock_external_service_dependencies["email_service_instance"]
|
||||
|
||||
# Verify email service was called
|
||||
mock_email_service_instance.send_email.assert_called_once()
|
||||
|
||||
# Verify Redis cache is still accessible
|
||||
assert redis_client.exists(cache_key) == 1
|
||||
assert redis_client.get(cache_key) == b"test_value"
|
||||
|
||||
# Clean up Redis cache
|
||||
redis_client.delete(cache_key)
|
||||
|
||||
def test_send_email_code_login_mail_task_error_handling_comprehensive(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
):
|
||||
"""
|
||||
Test comprehensive error handling for email code login mail task.
|
||||
|
||||
This test verifies that the task can properly:
|
||||
1. Handle various types of exceptions
|
||||
2. Log appropriate error messages
|
||||
3. Continue execution despite errors
|
||||
4. Maintain proper error reporting
|
||||
"""
|
||||
# Arrange: Setup test data
|
||||
fake = Faker()
|
||||
test_email = fake.email()
|
||||
test_code = "123456"
|
||||
test_language = "en-US"
|
||||
|
||||
# Test different exception types
|
||||
exception_types = [
|
||||
("ValueError", ValueError("Invalid email format")),
|
||||
("RuntimeError", RuntimeError("Service unavailable")),
|
||||
("ConnectionError", ConnectionError("Network error")),
|
||||
("TimeoutError", TimeoutError("Request timeout")),
|
||||
("Exception", Exception("Generic error")),
|
||||
]
|
||||
|
||||
for error_name, exception in exception_types:
|
||||
# Reset mocks for each test case
|
||||
mock_email_service_instance = mock_external_service_dependencies["email_service_instance"]
|
||||
mock_email_service_instance.reset_mock()
|
||||
mock_email_service_instance.send_email.side_effect = exception
|
||||
|
||||
# Mock logging to capture error messages
|
||||
with patch("tasks.mail_email_code_login.logger") as mock_logger:
|
||||
# Act: Execute the task - it should handle the exception gracefully
|
||||
send_email_code_login_mail_task(
|
||||
language=test_language,
|
||||
to=test_email,
|
||||
code=test_code,
|
||||
)
|
||||
|
||||
# Assert: Verify error handling
|
||||
# Verify email service was called (and failed)
|
||||
mock_email_service_instance.send_email.assert_called_once()
|
||||
|
||||
# Verify error was logged
|
||||
error_calls = [
|
||||
call
|
||||
for call in mock_logger.exception.call_args_list
|
||||
if f"Send email code login mail to {test_email} failed" in str(call)
|
||||
]
|
||||
# Check if any exception call was made (the exact message format may vary)
|
||||
assert mock_logger.exception.call_count >= 1, f"Error should be logged for {error_name}"
|
||||
|
||||
# Reset side effect for next iteration
|
||||
mock_email_service_instance.send_email.side_effect = None
|
||||
@@ -1,174 +1,53 @@
|
||||
import pytest
|
||||
|
||||
from controllers.console.app.app import _validate_description_length as app_validate
|
||||
from controllers.console.datasets.datasets import _validate_description_length as dataset_validate
|
||||
from controllers.service_api.dataset.dataset import _validate_description_length as service_dataset_validate
|
||||
from libs.validators import validate_description_length
|
||||
|
||||
|
||||
class TestDescriptionValidationUnit:
|
||||
"""Unit tests for description validation functions in App and Dataset APIs"""
|
||||
"""Unit tests for the centralized description validation function."""
|
||||
|
||||
def test_app_validate_description_length_valid(self):
|
||||
"""Test App validation function with valid descriptions"""
|
||||
def test_validate_description_length_valid(self):
|
||||
"""Test validation function with valid descriptions."""
|
||||
# Empty string should be valid
|
||||
assert app_validate("") == ""
|
||||
assert validate_description_length("") == ""
|
||||
|
||||
# None should be valid
|
||||
assert app_validate(None) is None
|
||||
assert validate_description_length(None) is None
|
||||
|
||||
# Short description should be valid
|
||||
short_desc = "Short description"
|
||||
assert app_validate(short_desc) == short_desc
|
||||
assert validate_description_length(short_desc) == short_desc
|
||||
|
||||
# Exactly 400 characters should be valid
|
||||
exactly_400 = "x" * 400
|
||||
assert app_validate(exactly_400) == exactly_400
|
||||
assert validate_description_length(exactly_400) == exactly_400
|
||||
|
||||
# Just under limit should be valid
|
||||
just_under = "x" * 399
|
||||
assert app_validate(just_under) == just_under
|
||||
assert validate_description_length(just_under) == just_under
|
||||
|
||||
def test_app_validate_description_length_invalid(self):
|
||||
"""Test App validation function with invalid descriptions"""
|
||||
def test_validate_description_length_invalid(self):
|
||||
"""Test validation function with invalid descriptions."""
|
||||
# 401 characters should fail
|
||||
just_over = "x" * 401
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
app_validate(just_over)
|
||||
validate_description_length(just_over)
|
||||
assert "Description cannot exceed 400 characters." in str(exc_info.value)
|
||||
|
||||
# 500 characters should fail
|
||||
way_over = "x" * 500
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
app_validate(way_over)
|
||||
validate_description_length(way_over)
|
||||
assert "Description cannot exceed 400 characters." in str(exc_info.value)
|
||||
|
||||
# 1000 characters should fail
|
||||
very_long = "x" * 1000
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
app_validate(very_long)
|
||||
validate_description_length(very_long)
|
||||
assert "Description cannot exceed 400 characters." in str(exc_info.value)
|
||||
|
||||
def test_dataset_validate_description_length_valid(self):
|
||||
"""Test Dataset validation function with valid descriptions"""
|
||||
# Empty string should be valid
|
||||
assert dataset_validate("") == ""
|
||||
|
||||
# Short description should be valid
|
||||
short_desc = "Short description"
|
||||
assert dataset_validate(short_desc) == short_desc
|
||||
|
||||
# Exactly 400 characters should be valid
|
||||
exactly_400 = "x" * 400
|
||||
assert dataset_validate(exactly_400) == exactly_400
|
||||
|
||||
# Just under limit should be valid
|
||||
just_under = "x" * 399
|
||||
assert dataset_validate(just_under) == just_under
|
||||
|
||||
def test_dataset_validate_description_length_invalid(self):
|
||||
"""Test Dataset validation function with invalid descriptions"""
|
||||
# 401 characters should fail
|
||||
just_over = "x" * 401
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
dataset_validate(just_over)
|
||||
assert "Description cannot exceed 400 characters." in str(exc_info.value)
|
||||
|
||||
# 500 characters should fail
|
||||
way_over = "x" * 500
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
dataset_validate(way_over)
|
||||
assert "Description cannot exceed 400 characters." in str(exc_info.value)
|
||||
|
||||
def test_service_dataset_validate_description_length_valid(self):
|
||||
"""Test Service Dataset validation function with valid descriptions"""
|
||||
# Empty string should be valid
|
||||
assert service_dataset_validate("") == ""
|
||||
|
||||
# None should be valid
|
||||
assert service_dataset_validate(None) is None
|
||||
|
||||
# Short description should be valid
|
||||
short_desc = "Short description"
|
||||
assert service_dataset_validate(short_desc) == short_desc
|
||||
|
||||
# Exactly 400 characters should be valid
|
||||
exactly_400 = "x" * 400
|
||||
assert service_dataset_validate(exactly_400) == exactly_400
|
||||
|
||||
# Just under limit should be valid
|
||||
just_under = "x" * 399
|
||||
assert service_dataset_validate(just_under) == just_under
|
||||
|
||||
def test_service_dataset_validate_description_length_invalid(self):
|
||||
"""Test Service Dataset validation function with invalid descriptions"""
|
||||
# 401 characters should fail
|
||||
just_over = "x" * 401
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
service_dataset_validate(just_over)
|
||||
assert "Description cannot exceed 400 characters." in str(exc_info.value)
|
||||
|
||||
# 500 characters should fail
|
||||
way_over = "x" * 500
|
||||
with pytest.raises(ValueError) as exc_info:
|
||||
service_dataset_validate(way_over)
|
||||
assert "Description cannot exceed 400 characters." in str(exc_info.value)
|
||||
|
||||
def test_app_dataset_validation_consistency(self):
|
||||
"""Test that App and Dataset validation functions behave identically"""
|
||||
test_cases = [
|
||||
"", # Empty string
|
||||
"Short description", # Normal description
|
||||
"x" * 100, # Medium description
|
||||
"x" * 400, # Exactly at limit
|
||||
]
|
||||
|
||||
# Test valid cases produce same results
|
||||
for test_desc in test_cases:
|
||||
assert app_validate(test_desc) == dataset_validate(test_desc) == service_dataset_validate(test_desc)
|
||||
|
||||
# Test invalid cases produce same errors
|
||||
invalid_cases = [
|
||||
"x" * 401, # Just over limit
|
||||
"x" * 500, # Way over limit
|
||||
"x" * 1000, # Very long
|
||||
]
|
||||
|
||||
for invalid_desc in invalid_cases:
|
||||
app_error = None
|
||||
dataset_error = None
|
||||
service_dataset_error = None
|
||||
|
||||
# Capture App validation error
|
||||
try:
|
||||
app_validate(invalid_desc)
|
||||
except ValueError as e:
|
||||
app_error = str(e)
|
||||
|
||||
# Capture Dataset validation error
|
||||
try:
|
||||
dataset_validate(invalid_desc)
|
||||
except ValueError as e:
|
||||
dataset_error = str(e)
|
||||
|
||||
# Capture Service Dataset validation error
|
||||
try:
|
||||
service_dataset_validate(invalid_desc)
|
||||
except ValueError as e:
|
||||
service_dataset_error = str(e)
|
||||
|
||||
# All should produce errors
|
||||
assert app_error is not None, f"App validation should fail for {len(invalid_desc)} characters"
|
||||
assert dataset_error is not None, f"Dataset validation should fail for {len(invalid_desc)} characters"
|
||||
error_msg = f"Service Dataset validation should fail for {len(invalid_desc)} characters"
|
||||
assert service_dataset_error is not None, error_msg
|
||||
|
||||
# Errors should be identical
|
||||
error_msg = f"Error messages should be identical for {len(invalid_desc)} characters"
|
||||
assert app_error == dataset_error == service_dataset_error, error_msg
|
||||
assert app_error == "Description cannot exceed 400 characters."
|
||||
|
||||
def test_boundary_values(self):
|
||||
"""Test boundary values around the 400 character limit"""
|
||||
"""Test boundary values around the 400 character limit."""
|
||||
boundary_tests = [
|
||||
(0, True), # Empty
|
||||
(1, True), # Minimum
|
||||
@@ -184,69 +63,45 @@ class TestDescriptionValidationUnit:
|
||||
|
||||
if should_pass:
|
||||
# Should not raise exception
|
||||
assert app_validate(test_desc) == test_desc
|
||||
assert dataset_validate(test_desc) == test_desc
|
||||
assert service_dataset_validate(test_desc) == test_desc
|
||||
assert validate_description_length(test_desc) == test_desc
|
||||
else:
|
||||
# Should raise ValueError
|
||||
with pytest.raises(ValueError):
|
||||
app_validate(test_desc)
|
||||
with pytest.raises(ValueError):
|
||||
dataset_validate(test_desc)
|
||||
with pytest.raises(ValueError):
|
||||
service_dataset_validate(test_desc)
|
||||
validate_description_length(test_desc)
|
||||
|
||||
def test_special_characters(self):
|
||||
"""Test validation with special characters, Unicode, etc."""
|
||||
# Unicode characters
|
||||
unicode_desc = "测试描述" * 100 # Chinese characters
|
||||
if len(unicode_desc) <= 400:
|
||||
assert app_validate(unicode_desc) == unicode_desc
|
||||
assert dataset_validate(unicode_desc) == unicode_desc
|
||||
assert service_dataset_validate(unicode_desc) == unicode_desc
|
||||
assert validate_description_length(unicode_desc) == unicode_desc
|
||||
|
||||
# Special characters
|
||||
special_desc = "Special chars: !@#$%^&*()_+-=[]{}|;':\",./<>?" * 10
|
||||
if len(special_desc) <= 400:
|
||||
assert app_validate(special_desc) == special_desc
|
||||
assert dataset_validate(special_desc) == special_desc
|
||||
assert service_dataset_validate(special_desc) == special_desc
|
||||
assert validate_description_length(special_desc) == special_desc
|
||||
|
||||
# Mixed content
|
||||
mixed_desc = "Mixed content: 测试 123 !@# " * 15
|
||||
if len(mixed_desc) <= 400:
|
||||
assert app_validate(mixed_desc) == mixed_desc
|
||||
assert dataset_validate(mixed_desc) == mixed_desc
|
||||
assert service_dataset_validate(mixed_desc) == mixed_desc
|
||||
assert validate_description_length(mixed_desc) == mixed_desc
|
||||
elif len(mixed_desc) > 400:
|
||||
with pytest.raises(ValueError):
|
||||
app_validate(mixed_desc)
|
||||
with pytest.raises(ValueError):
|
||||
dataset_validate(mixed_desc)
|
||||
with pytest.raises(ValueError):
|
||||
service_dataset_validate(mixed_desc)
|
||||
validate_description_length(mixed_desc)
|
||||
|
||||
def test_whitespace_handling(self):
|
||||
"""Test validation with various whitespace scenarios"""
|
||||
"""Test validation with various whitespace scenarios."""
|
||||
# Leading/trailing whitespace
|
||||
whitespace_desc = " Description with whitespace "
|
||||
if len(whitespace_desc) <= 400:
|
||||
assert app_validate(whitespace_desc) == whitespace_desc
|
||||
assert dataset_validate(whitespace_desc) == whitespace_desc
|
||||
assert service_dataset_validate(whitespace_desc) == whitespace_desc
|
||||
assert validate_description_length(whitespace_desc) == whitespace_desc
|
||||
|
||||
# Newlines and tabs
|
||||
multiline_desc = "Line 1\nLine 2\tTabbed content"
|
||||
if len(multiline_desc) <= 400:
|
||||
assert app_validate(multiline_desc) == multiline_desc
|
||||
assert dataset_validate(multiline_desc) == multiline_desc
|
||||
assert service_dataset_validate(multiline_desc) == multiline_desc
|
||||
assert validate_description_length(multiline_desc) == multiline_desc
|
||||
|
||||
# Only whitespace over limit
|
||||
only_spaces = " " * 401
|
||||
with pytest.raises(ValueError):
|
||||
app_validate(only_spaces)
|
||||
with pytest.raises(ValueError):
|
||||
dataset_validate(only_spaces)
|
||||
with pytest.raises(ValueError):
|
||||
service_dataset_validate(only_spaces)
|
||||
validate_description_length(only_spaces)
|
||||
|
||||
@@ -172,73 +172,31 @@ class TestSupabaseStorage:
|
||||
assert "test-bucket" in [call[0][0] for call in mock_client.storage.from_.call_args_list if call[0]]
|
||||
mock_client.storage.from_().download.assert_called_with("test.txt")
|
||||
|
||||
def test_exists_with_list_containing_items(self, storage_with_mock_client):
|
||||
"""Test exists returns True when list() returns items (using len() > 0)."""
|
||||
def test_exists_returns_true_when_file_found(self, storage_with_mock_client):
|
||||
"""Test exists returns True when list() returns items."""
|
||||
storage, mock_client = storage_with_mock_client
|
||||
|
||||
# Mock list return with special object that has count() method
|
||||
mock_list_result = Mock()
|
||||
mock_list_result.count.return_value = 1
|
||||
mock_client.storage.from_().list.return_value = mock_list_result
|
||||
mock_client.storage.from_().list.return_value = [{"name": "test.txt"}]
|
||||
|
||||
result = storage.exists("test.txt")
|
||||
|
||||
assert result is True
|
||||
# from_ gets called during init too, so just check it was called with the right bucket
|
||||
assert "test-bucket" in [call[0][0] for call in mock_client.storage.from_.call_args_list if call[0]]
|
||||
mock_client.storage.from_().list.assert_called_with("test.txt")
|
||||
mock_client.storage.from_().list.assert_called_with(path="test.txt")
|
||||
|
||||
def test_exists_with_count_method_greater_than_zero(self, storage_with_mock_client):
|
||||
"""Test exists returns True when list result has count() > 0."""
|
||||
def test_exists_returns_false_when_file_not_found(self, storage_with_mock_client):
|
||||
"""Test exists returns False when list() returns an empty list."""
|
||||
storage, mock_client = storage_with_mock_client
|
||||
|
||||
# Mock list return with count() method
|
||||
mock_list_result = Mock()
|
||||
mock_list_result.count.return_value = 1
|
||||
mock_client.storage.from_().list.return_value = mock_list_result
|
||||
|
||||
result = storage.exists("test.txt")
|
||||
|
||||
assert result is True
|
||||
# Verify the correct calls were made
|
||||
assert "test-bucket" in [call[0][0] for call in mock_client.storage.from_.call_args_list if call[0]]
|
||||
mock_client.storage.from_().list.assert_called_with("test.txt")
|
||||
mock_list_result.count.assert_called()
|
||||
|
||||
def test_exists_with_count_method_zero(self, storage_with_mock_client):
|
||||
"""Test exists returns False when list result has count() == 0."""
|
||||
storage, mock_client = storage_with_mock_client
|
||||
|
||||
# Mock list return with count() method returning 0
|
||||
mock_list_result = Mock()
|
||||
mock_list_result.count.return_value = 0
|
||||
mock_client.storage.from_().list.return_value = mock_list_result
|
||||
mock_client.storage.from_().list.return_value = []
|
||||
|
||||
result = storage.exists("test.txt")
|
||||
|
||||
assert result is False
|
||||
# Verify the correct calls were made
|
||||
assert "test-bucket" in [call[0][0] for call in mock_client.storage.from_.call_args_list if call[0]]
|
||||
mock_client.storage.from_().list.assert_called_with("test.txt")
|
||||
mock_list_result.count.assert_called()
|
||||
mock_client.storage.from_().list.assert_called_with(path="test.txt")
|
||||
|
||||
def test_exists_with_empty_list(self, storage_with_mock_client):
|
||||
"""Test exists returns False when list() returns empty list."""
|
||||
storage, mock_client = storage_with_mock_client
|
||||
|
||||
# Mock list return with special object that has count() method returning 0
|
||||
mock_list_result = Mock()
|
||||
mock_list_result.count.return_value = 0
|
||||
mock_client.storage.from_().list.return_value = mock_list_result
|
||||
|
||||
result = storage.exists("test.txt")
|
||||
|
||||
assert result is False
|
||||
# Verify the correct calls were made
|
||||
assert "test-bucket" in [call[0][0] for call in mock_client.storage.from_.call_args_list if call[0]]
|
||||
mock_client.storage.from_().list.assert_called_with("test.txt")
|
||||
|
||||
def test_delete_calls_remove_with_filename(self, storage_with_mock_client):
|
||||
def test_delete_calls_remove_with_filename_in_list(self, storage_with_mock_client):
|
||||
"""Test delete calls remove([...]) (some client versions require a list)."""
|
||||
storage, mock_client = storage_with_mock_client
|
||||
|
||||
@@ -247,7 +205,7 @@ class TestSupabaseStorage:
|
||||
storage.delete(filename)
|
||||
|
||||
mock_client.storage.from_.assert_called_once_with("test-bucket")
|
||||
mock_client.storage.from_().remove.assert_called_once_with(filename)
|
||||
mock_client.storage.from_().remove.assert_called_once_with([filename])
|
||||
|
||||
def test_bucket_exists_returns_true_when_bucket_found(self):
|
||||
"""Test bucket_exists returns True when bucket is found in list."""
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from tos import TosClientV2 # type: ignore
|
||||
|
||||
@@ -13,7 +15,13 @@ class TestVolcengineTos(BaseStorageTest):
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup_method(self, setup_volcengine_tos_mock):
|
||||
"""Executed before each test method."""
|
||||
self.storage = VolcengineTosStorage()
|
||||
with patch("extensions.storage.volcengine_tos_storage.dify_config") as mock_config:
|
||||
mock_config.VOLCENGINE_TOS_ACCESS_KEY = "test_access_key"
|
||||
mock_config.VOLCENGINE_TOS_SECRET_KEY = "test_secret_key"
|
||||
mock_config.VOLCENGINE_TOS_ENDPOINT = "test_endpoint"
|
||||
mock_config.VOLCENGINE_TOS_REGION = "test_region"
|
||||
self.storage = VolcengineTosStorage()
|
||||
|
||||
self.storage.bucket_name = get_example_bucket()
|
||||
self.storage.client = TosClientV2(
|
||||
ak="dify",
|
||||
|
||||
2
api/uv.lock
generated
2
api/uv.lock
generated
@@ -1276,7 +1276,7 @@ wheels = [
|
||||
|
||||
[[package]]
|
||||
name = "dify-api"
|
||||
version = "1.9.0"
|
||||
version = "1.9.1"
|
||||
source = { virtual = "." }
|
||||
dependencies = [
|
||||
{ name = "arize-phoenix-otel" },
|
||||
|
||||
@@ -2,7 +2,7 @@ x-shared-env: &shared-api-worker-env
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:1.9.0
|
||||
image: langgenius/dify-api:1.9.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@@ -31,7 +31,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:1.9.0
|
||||
image: langgenius/dify-api:1.9.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@@ -58,7 +58,7 @@ services:
|
||||
# worker_beat service
|
||||
# Celery beat for scheduling periodic tasks.
|
||||
worker_beat:
|
||||
image: langgenius/dify-api:1.9.0
|
||||
image: langgenius/dify-api:1.9.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@@ -76,7 +76,7 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:1.9.0
|
||||
image: langgenius/dify-web:1.9.1
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
|
||||
@@ -604,7 +604,7 @@ x-shared-env: &shared-api-worker-env
|
||||
services:
|
||||
# API service
|
||||
api:
|
||||
image: langgenius/dify-api:1.9.0
|
||||
image: langgenius/dify-api:1.9.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@@ -633,7 +633,7 @@ services:
|
||||
# worker service
|
||||
# The Celery worker for processing the queue.
|
||||
worker:
|
||||
image: langgenius/dify-api:1.9.0
|
||||
image: langgenius/dify-api:1.9.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@@ -660,7 +660,7 @@ services:
|
||||
# worker_beat service
|
||||
# Celery beat for scheduling periodic tasks.
|
||||
worker_beat:
|
||||
image: langgenius/dify-api:1.9.0
|
||||
image: langgenius/dify-api:1.9.1
|
||||
restart: always
|
||||
environment:
|
||||
# Use the shared environment variables.
|
||||
@@ -678,7 +678,7 @@ services:
|
||||
|
||||
# Frontend web application.
|
||||
web:
|
||||
image: langgenius/dify-web:1.9.0
|
||||
image: langgenius/dify-web:1.9.1
|
||||
restart: always
|
||||
environment:
|
||||
CONSOLE_API_URL: ${CONSOLE_API_URL:-}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
<p align="center">
|
||||
<a href="https://cloud.dify.ai">Dify Cloud</a> ·
|
||||
@@ -35,17 +35,19 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
<a href="../../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="../zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="../zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="../ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="../es-ES/README.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="../fr-FR/README.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="../tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="../ko-KR/README.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="../ar-SA/README.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="../tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="../vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="../de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="../bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
<div style="text-align: right;">
|
||||
@@ -97,7 +99,7 @@
|
||||
|
||||
</br>
|
||||
|
||||
أسهل طريقة لبدء تشغيل خادم Dify هي تشغيل ملف [docker-compose.yml](docker/docker-compose.yaml) الخاص بنا. قبل تشغيل أمر التثبيت، تأكد من تثبيت [Docker](https://docs.docker.com/get-docker/) و [Docker Compose](https://docs.docker.com/compose/install/) على جهازك:
|
||||
أسهل طريقة لبدء تشغيل خادم Dify هي تشغيل ملف [docker-compose.yml](../../docker/docker-compose.yaml) الخاص بنا. قبل تشغيل أمر التثبيت، تأكد من تثبيت [Docker](https://docs.docker.com/get-docker/) و [Docker Compose](https://docs.docker.com/compose/install/) على جهازك:
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
@@ -111,7 +113,7 @@ docker compose up -d
|
||||
|
||||
## الخطوات التالية
|
||||
|
||||
إذا كنت بحاجة إلى تخصيص الإعدادات، فيرجى الرجوع إلى التعليقات في ملف [.env.example](docker/.env.example) وتحديث القيم المقابلة في ملف `.env`. بالإضافة إلى ذلك، قد تحتاج إلى إجراء تعديلات على ملف `docker-compose.yaml` نفسه، مثل تغيير إصدارات الصور أو تعيينات المنافذ أو نقاط تحميل وحدات التخزين، بناءً على بيئة النشر ومتطلباتك الخاصة. بعد إجراء أي تغييرات، يرجى إعادة تشغيل `docker-compose up -d`. يمكنك العثور على قائمة كاملة بمتغيرات البيئة المتاحة [هنا](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
إذا كنت بحاجة إلى تخصيص الإعدادات، فيرجى الرجوع إلى التعليقات في ملف [.env.example](../../docker/.env.example) وتحديث القيم المقابلة في ملف `.env`. بالإضافة إلى ذلك، قد تحتاج إلى إجراء تعديلات على ملف `docker-compose.yaml` نفسه، مثل تغيير إصدارات الصور أو تعيينات المنافذ أو نقاط تحميل وحدات التخزين، بناءً على بيئة النشر ومتطلباتك الخاصة. بعد إجراء أي تغييرات، يرجى إعادة تشغيل `docker-compose up -d`. يمكنك العثور على قائمة كاملة بمتغيرات البيئة المتاحة [هنا](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
يوجد مجتمع خاص بـ [Helm Charts](https://helm.sh/) وملفات YAML التي تسمح بتنفيذ Dify على Kubernetes للنظام من الإيجابيات العلوية.
|
||||
|
||||
@@ -185,12 +187,4 @@ docker compose up -d
|
||||
|
||||
## الرخصة
|
||||
|
||||
هذا المستودع متاح تحت [رخصة البرنامج الحر Dify](../LICENSE)، والتي تعتبر بشكل أساسي Apache 2.0 مع بعض القيود الإضافية.
|
||||
|
||||
## الكشف عن الأمان
|
||||
|
||||
لحماية خصوصيتك، يرجى تجنب نشر مشكلات الأمان على GitHub. بدلاً من ذلك، أرسل أسئلتك إلى <security@dify.ai> وسنقدم لك إجابة أكثر تفصيلاً.
|
||||
|
||||
## الرخصة
|
||||
|
||||
هذا المستودع متاح تحت [رخصة البرنامج الحر Dify](../LICENSE)، والتي تعتبر بشكل أساسي Apache 2.0 مع بعض القيود الإضافية.
|
||||
هذا المستودع متاح تحت [رخصة البرنامج الحر Dify](../../LICENSE)، والتي تعتبر بشكل أساسي Apache 2.0 مع بعض القيود الإضافية.
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
<p align="center">
|
||||
📌 <a href="https://dify.ai/blog/introducing-dify-workflow-file-upload-a-demo-on-ai-podcast">ডিফাই ওয়ার্কফ্লো ফাইল আপলোড পরিচিতি: গুগল নোটবুক-এলএম পডকাস্ট পুনর্নির্মাণ</a>
|
||||
@@ -39,18 +39,19 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_DE.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
<a href="../../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="../zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="../zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="../ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="../es-ES/README.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="../fr-FR/README.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="../tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="../ko-KR/README.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="../ar-SA/README.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="../tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="../vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="../de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="../bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
ডিফাই একটি ওপেন-সোর্স LLM অ্যাপ ডেভেলপমেন্ট প্ল্যাটফর্ম। এটি ইন্টুইটিভ ইন্টারফেস, এজেন্টিক AI ওয়ার্কফ্লো, RAG পাইপলাইন, এজেন্ট ক্যাপাবিলিটি, মডেল ম্যানেজমেন্ট, মনিটরিং সুবিধা এবং আরও অনেক কিছু একত্রিত করে, যা দ্রুত প্রোটোটাইপ থেকে প্রোডাকশন পর্যন্ত নিয়ে যেতে সহায়তা করে।
|
||||
@@ -64,7 +65,7 @@
|
||||
|
||||
</br>
|
||||
|
||||
ডিফাই সার্ভার চালু করার সবচেয়ে সহজ উপায় [docker compose](docker/docker-compose.yaml) মাধ্যমে। নিম্নলিখিত কমান্ডগুলো ব্যবহার করে ডিফাই চালানোর আগে, নিশ্চিত করুন যে আপনার মেশিনে [Docker](https://docs.docker.com/get-docker/) এবং [Docker Compose](https://docs.docker.com/compose/install/) ইনস্টল করা আছে :
|
||||
ডিফাই সার্ভার চালু করার সবচেয়ে সহজ উপায় [docker compose](../../docker/docker-compose.yaml) মাধ্যমে। নিম্নলিখিত কমান্ডগুলো ব্যবহার করে ডিফাই চালানোর আগে, নিশ্চিত করুন যে আপনার মেশিনে [Docker](https://docs.docker.com/get-docker/) এবং [Docker Compose](https://docs.docker.com/compose/install/) ইনস্টল করা আছে :
|
||||
|
||||
```bash
|
||||
cd dify
|
||||
@@ -128,7 +129,7 @@ GitHub-এ ডিফাইকে স্টার দিয়ে রাখুন
|
||||
|
||||
## Advanced Setup
|
||||
|
||||
যদি আপনার কনফিগারেশনটি কাস্টমাইজ করার প্রয়োজন হয়, তাহলে অনুগ্রহ করে আমাদের [.env.example](docker/.env.example) ফাইল দেখুন এবং আপনার `.env` ফাইলে সংশ্লিষ্ট মানগুলি আপডেট করুন। এছাড়াও, আপনার নির্দিষ্ট এনভায়রনমেন্ট এবং প্রয়োজনীয়তার উপর ভিত্তি করে আপনাকে `docker-compose.yaml` ফাইলে সমন্বয় করতে হতে পারে, যেমন ইমেজ ভার্সন পরিবর্তন করা, পোর্ট ম্যাপিং করা, অথবা ভলিউম মাউন্ট করা।
|
||||
যদি আপনার কনফিগারেশনটি কাস্টমাইজ করার প্রয়োজন হয়, তাহলে অনুগ্রহ করে আমাদের [.env.example](../../docker/.env.example) ফাইল দেখুন এবং আপনার `.env` ফাইলে সংশ্লিষ্ট মানগুলি আপডেট করুন। এছাড়াও, আপনার নির্দিষ্ট এনভায়রনমেন্ট এবং প্রয়োজনীয়তার উপর ভিত্তি করে আপনাকে `docker-compose.yaml` ফাইলে সমন্বয় করতে হতে পারে, যেমন ইমেজ ভার্সন পরিবর্তন করা, পোর্ট ম্যাপিং করা, অথবা ভলিউম মাউন্ট করা।
|
||||
যেকোনো পরিবর্তন করার পর, অনুগ্রহ করে `docker-compose up -d` পুনরায় চালান। ভেরিয়েবলের সম্পূর্ণ তালিকা [এখানে] (https://docs.dify.ai/getting-started/install-self-hosted/environments) খুঁজে পেতে পারেন।
|
||||
|
||||
যদি আপনি একটি হাইলি এভেইলেবল সেটআপ কনফিগার করতে চান, তাহলে কমিউনিটি [Helm Charts](https://helm.sh/) এবং YAML ফাইল রয়েছে যা Dify কে Kubernetes-এ ডিপ্লয় করার প্রক্রিয়া বর্ণনা করে।
|
||||
@@ -175,7 +176,7 @@ GitHub-এ ডিফাইকে স্টার দিয়ে রাখুন
|
||||
|
||||
## Contributing
|
||||
|
||||
যারা কোড অবদান রাখতে চান, তাদের জন্য আমাদের [অবদান নির্দেশিকা] দেখুন (https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)।
|
||||
যারা কোড অবদান রাখতে চান, তাদের জন্য আমাদের [অবদান নির্দেশিকা](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) দেখুন।
|
||||
একই সাথে, সোশ্যাল মিডিয়া এবং ইভেন্ট এবং কনফারেন্সে এটি শেয়ার করে Dify কে সমর্থন করুন।
|
||||
|
||||
> আমরা ম্যান্ডারিন বা ইংরেজি ছাড়া অন্য ভাষায় Dify অনুবাদ করতে সাহায্য করার জন্য অবদানকারীদের খুঁজছি। আপনি যদি সাহায্য করতে আগ্রহী হন, তাহলে আরও তথ্যের জন্য [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) দেখুন এবং আমাদের [ডিসকর্ড কমিউনিটি সার্ভার](https://discord.gg/8Tpq4AcN9c) এর `গ্লোবাল-ইউজারস` চ্যানেলে আমাদের একটি মন্তব্য করুন।
|
||||
@@ -203,4 +204,4 @@ GitHub-এ ডিফাইকে স্টার দিয়ে রাখুন
|
||||
|
||||
## লাইসেন্স
|
||||
|
||||
এই রিপোজিটরিটি [ডিফাই ওপেন সোর্স লাইসেন্স](../LICENSE) এর অধিনে , যা মূলত অ্যাপাচি ২.০, তবে কিছু অতিরিক্ত বিধিনিষেধ রয়েছে।
|
||||
এই রিপোজিটরিটি [ডিফাই ওপেন সোর্স লাইসেন্স](../../LICENSE) এর অধিনে , যা মূলত অ্যাপাচি ২.০, তবে কিছু অতিরিক্ত বিধিনিষেধ রয়েছে।
|
||||
@@ -6,7 +6,7 @@ Wir müssen wendig sein und schnell liefern, aber wir möchten auch sicherstelle
|
||||
|
||||
Dieser Leitfaden ist, wie Dify selbst, in ständiger Entwicklung. Wir sind dankbar für Ihr Verständnis, falls er manchmal hinter dem eigentlichen Projekt zurückbleibt, und begrüßen jedes Feedback zur Verbesserung.
|
||||
|
||||
Bitte nehmen Sie sich einen Moment Zeit, um unsere [Lizenz- und Mitwirkungsvereinbarung](../LICENSE) zu lesen. Die Community hält sich außerdem an den [Verhaltenskodex](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
|
||||
Bitte nehmen Sie sich einen Moment Zeit, um unsere [Lizenz- und Mitwirkungsvereinbarung](../../LICENSE) zu lesen. Die Community hält sich außerdem an den [Verhaltenskodex](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
|
||||
|
||||
## Bevor Sie loslegen
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
<p align="center">
|
||||
📌 <a href="https://dify.ai/blog/introducing-dify-workflow-file-upload-a-demo-on-ai-podcast">Einführung in Dify Workflow File Upload: Google NotebookLM Podcast nachbilden</a>
|
||||
@@ -39,18 +39,19 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_DE.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
<a href="../../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="../zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="../zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="../ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="../es-ES/README.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="../fr-FR/README.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="../tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="../ko-KR/README.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="../ar-SA/README.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="../tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="../vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="../de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="../bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
Dify ist eine Open-Source-Plattform zur Entwicklung von LLM-Anwendungen. Ihre intuitive Benutzeroberfläche vereint agentenbasierte KI-Workflows, RAG-Pipelines, Agentenfunktionen, Modellverwaltung, Überwachungsfunktionen und mehr, sodass Sie schnell von einem Prototyp in die Produktion übergehen können.
|
||||
@@ -64,7 +65,7 @@ Dify ist eine Open-Source-Plattform zur Entwicklung von LLM-Anwendungen. Ihre in
|
||||
|
||||
</br>
|
||||
|
||||
Der einfachste Weg, den Dify-Server zu starten, ist über [docker compose](docker/docker-compose.yaml). Stellen Sie vor dem Ausführen von Dify mit den folgenden Befehlen sicher, dass [Docker](https://docs.docker.com/get-docker/) und [Docker Compose](https://docs.docker.com/compose/install/) auf Ihrem System installiert sind:
|
||||
Der einfachste Weg, den Dify-Server zu starten, ist über [docker compose](../../docker/docker-compose.yaml). Stellen Sie vor dem Ausführen von Dify mit den folgenden Befehlen sicher, dass [Docker](https://docs.docker.com/get-docker/) und [Docker Compose](https://docs.docker.com/compose/install/) auf Ihrem System installiert sind:
|
||||
|
||||
```bash
|
||||
cd dify
|
||||
@@ -127,7 +128,7 @@ Star Dify auf GitHub und lassen Sie sich sofort über neue Releases benachrichti
|
||||
|
||||
## Erweiterte Einstellungen
|
||||
|
||||
Falls Sie die Konfiguration anpassen müssen, lesen Sie bitte die Kommentare in unserer [.env.example](docker/.env.example)-Datei und aktualisieren Sie die entsprechenden Werte in Ihrer `.env`-Datei. Zusätzlich müssen Sie eventuell Anpassungen an der `docker-compose.yaml`-Datei vornehmen, wie zum Beispiel das Ändern von Image-Versionen, Portzuordnungen oder Volumen-Mounts, je nach Ihrer spezifischen Einsatzumgebung und Ihren Anforderungen. Nachdem Sie Änderungen vorgenommen haben, starten Sie `docker-compose up -d` erneut. Eine vollständige Liste der verfügbaren Umgebungsvariablen finden Sie [hier](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
Falls Sie die Konfiguration anpassen müssen, lesen Sie bitte die Kommentare in unserer [.env.example](../../docker/.env.example)-Datei und aktualisieren Sie die entsprechenden Werte in Ihrer `.env`-Datei. Zusätzlich müssen Sie eventuell Anpassungen an der `docker-compose.yaml`-Datei vornehmen, wie zum Beispiel das Ändern von Image-Versionen, Portzuordnungen oder Volumen-Mounts, je nach Ihrer spezifischen Einsatzumgebung und Ihren Anforderungen. Nachdem Sie Änderungen vorgenommen haben, starten Sie `docker-compose up -d` erneut. Eine vollständige Liste der verfügbaren Umgebungsvariablen finden Sie [hier](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
Falls Sie eine hochverfügbare Konfiguration einrichten möchten, gibt es von der Community bereitgestellte [Helm Charts](https://helm.sh/) und YAML-Dateien, die es ermöglichen, Dify auf Kubernetes bereitzustellen.
|
||||
|
||||
@@ -173,14 +174,14 @@ Stellen Sie Dify mit einem Klick in AKS bereit, indem Sie [Azure Devops Pipeline
|
||||
|
||||
## Contributing
|
||||
|
||||
Falls Sie Code beitragen möchten, lesen Sie bitte unseren [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING/CONTRIBUTING_DE.md). Gleichzeitig bitten wir Sie, Dify zu unterstützen, indem Sie es in den sozialen Medien teilen und auf Veranstaltungen und Konferenzen präsentieren.
|
||||
Falls Sie Code beitragen möchten, lesen Sie bitte unseren [Contribution Guide](./CONTRIBUTING.md). Gleichzeitig bitten wir Sie, Dify zu unterstützen, indem Sie es in den sozialen Medien teilen und auf Veranstaltungen und Konferenzen präsentieren.
|
||||
|
||||
> Wir suchen Mitwirkende, die dabei helfen, Dify in weitere Sprachen zu übersetzen – außer Mandarin oder Englisch. Wenn Sie Interesse an einer Mitarbeit haben, lesen Sie bitte die [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) für weitere Informationen und hinterlassen Sie einen Kommentar im `global-users`-Kanal unseres [Discord Community Servers](https://discord.gg/8Tpq4AcN9c).
|
||||
|
||||
## Gemeinschaft & Kontakt
|
||||
|
||||
- [GitHub Discussion](https://github.com/langgenius/dify/discussions). Am besten geeignet für: den Austausch von Feedback und das Stellen von Fragen.
|
||||
- [GitHub Issues](https://github.com/langgenius/dify/issues). Am besten für: Fehler, auf die Sie bei der Verwendung von Dify.AI stoßen, und Funktionsvorschläge. Siehe unseren [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
|
||||
- [GitHub Issues](https://github.com/langgenius/dify/issues). Am besten für: Fehler, auf die Sie bei der Verwendung von Dify.AI stoßen, und Funktionsvorschläge. Siehe unseren [Contribution Guide](./CONTRIBUTING.md).
|
||||
- [Discord](https://discord.gg/FngNHpbcY7). Am besten geeignet für: den Austausch von Bewerbungen und den Austausch mit der Community.
|
||||
- [X(Twitter)](https://twitter.com/dify_ai). Am besten geeignet für: den Austausch von Bewerbungen und den Austausch mit der Community.
|
||||
|
||||
@@ -200,4 +201,4 @@ Um Ihre Privatsphäre zu schützen, vermeiden Sie es bitte, Sicherheitsprobleme
|
||||
|
||||
## Lizenz
|
||||
|
||||
Dieses Repository steht unter der [Dify Open Source License](../LICENSE), die im Wesentlichen Apache 2.0 mit einigen zusätzlichen Einschränkungen ist.
|
||||
Dieses Repository steht unter der [Dify Open Source License](../../LICENSE), die im Wesentlichen Apache 2.0 mit einigen zusätzlichen Einschränkungen ist.
|
||||
@@ -6,7 +6,7 @@ Necesitamos ser ágiles y enviar rápidamente dado donde estamos, pero también
|
||||
|
||||
Esta guía, como Dify mismo, es un trabajo en constante progreso. Agradecemos mucho tu comprensión si a veces se queda atrás del proyecto real, y damos la bienvenida a cualquier comentario para que podamos mejorar.
|
||||
|
||||
En términos de licencia, por favor tómate un minuto para leer nuestro breve [Acuerdo de Licencia y Colaborador](../LICENSE). La comunidad también se adhiere al [código de conducta](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
|
||||
En términos de licencia, por favor tómate un minuto para leer nuestro breve [Acuerdo de Licencia y Colaborador](../../LICENSE). La comunidad también se adhiere al [código de conducta](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
|
||||
|
||||
## Antes de empezar
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
<p align="center">
|
||||
<a href="https://cloud.dify.ai">Dify Cloud</a> ·
|
||||
@@ -35,17 +35,19 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
<a href="../../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="../zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="../zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="../ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="../es-ES/README.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="../fr-FR/README.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="../tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="../ko-KR/README.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="../ar-SA/README.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="../tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="../vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="../de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="../bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
#
|
||||
@@ -108,7 +110,7 @@ Dale estrella a Dify en GitHub y serás notificado instantáneamente de las nuev
|
||||
|
||||
</br>
|
||||
|
||||
La forma más fácil de iniciar el servidor de Dify es ejecutar nuestro archivo [docker-compose.yml](docker/docker-compose.yaml). Antes de ejecutar el comando de instalación, asegúrate de que [Docker](https://docs.docker.com/get-docker/) y [Docker Compose](https://docs.docker.com/compose/install/) estén instalados en tu máquina:
|
||||
La forma más fácil de iniciar el servidor de Dify es ejecutar nuestro archivo [docker-compose.yml](../../docker/docker-compose.yaml). Antes de ejecutar el comando de instalación, asegúrate de que [Docker](https://docs.docker.com/get-docker/) y [Docker Compose](https://docs.docker.com/compose/install/) estén instalados en tu máquina:
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
@@ -122,7 +124,7 @@ Después de ejecutarlo, puedes acceder al panel de control de Dify en tu navegad
|
||||
|
||||
## Próximos pasos
|
||||
|
||||
Si necesita personalizar la configuración, consulte los comentarios en nuestro archivo [.env.example](docker/.env.example) y actualice los valores correspondientes en su archivo `.env`. Además, es posible que deba realizar ajustes en el propio archivo `docker-compose.yaml`, como cambiar las versiones de las imágenes, las asignaciones de puertos o los montajes de volúmenes, según su entorno de implementación y requisitos específicos. Después de realizar cualquier cambio, vuelva a ejecutar `docker-compose up -d`. Puede encontrar la lista completa de variables de entorno disponibles [aquí](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
Si necesita personalizar la configuración, consulte los comentarios en nuestro archivo [.env.example](../../docker/.env.example) y actualice los valores correspondientes en su archivo `.env`. Además, es posible que deba realizar ajustes en el propio archivo `docker-compose.yaml`, como cambiar las versiones de las imágenes, las asignaciones de puertos o los montajes de volúmenes, según su entorno de implementación y requisitos específicos. Después de realizar cualquier cambio, vuelva a ejecutar `docker-compose up -d`. Puede encontrar la lista completa de variables de entorno disponibles [aquí](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
. Después de realizar los cambios, ejecuta `docker-compose up -d` nuevamente. Puedes ver la lista completa de variables de entorno [aquí](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
@@ -170,7 +172,7 @@ Implementa Dify en AKS con un clic usando [Azure Devops Pipeline Helm Chart by @
|
||||
|
||||
## Contribuir
|
||||
|
||||
Para aquellos que deseen contribuir con código, consulten nuestra [Guía de contribución](https://github.com/langgenius/dify/blob/main/CONTRIBUTING/CONTRIBUTING_ES.md).
|
||||
Para aquellos que deseen contribuir con código, consulten nuestra [Guía de contribución](./CONTRIBUTING.md).
|
||||
Al mismo tiempo, considera apoyar a Dify compartiéndolo en redes sociales y en eventos y conferencias.
|
||||
|
||||
> Estamos buscando colaboradores para ayudar con la traducción de Dify a idiomas que no sean el mandarín o el inglés. Si estás interesado en ayudar, consulta el [README de i18n](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) para obtener más información y déjanos un comentario en el canal `global-users` de nuestro [Servidor de Comunidad en Discord](https://discord.gg/8Tpq4AcN9c).
|
||||
@@ -184,7 +186,7 @@ Al mismo tiempo, considera apoyar a Dify compartiéndolo en redes sociales y en
|
||||
## Comunidad y Contacto
|
||||
|
||||
- [Discusión en GitHub](https://github.com/langgenius/dify/discussions). Lo mejor para: compartir comentarios y hacer preguntas.
|
||||
- [Reporte de problemas en GitHub](https://github.com/langgenius/dify/issues). Lo mejor para: errores que encuentres usando Dify.AI y propuestas de características. Consulta nuestra [Guía de contribución](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
|
||||
- [Reporte de problemas en GitHub](https://github.com/langgenius/dify/issues). Lo mejor para: errores que encuentres usando Dify.AI y propuestas de características. Consulta nuestra [Guía de contribución](./CONTRIBUTING.md).
|
||||
- [Discord](https://discord.gg/FngNHpbcY7). Lo mejor para: compartir tus aplicaciones y pasar el rato con la comunidad.
|
||||
- [X(Twitter)](https://twitter.com/dify_ai). Lo mejor para: compartir tus aplicaciones y pasar el rato con la comunidad.
|
||||
|
||||
@@ -198,12 +200,4 @@ Para proteger tu privacidad, evita publicar problemas de seguridad en GitHub. En
|
||||
|
||||
## Licencia
|
||||
|
||||
Este repositorio está disponible bajo la [Licencia de Código Abierto de Dify](../LICENSE), que es esencialmente Apache 2.0 con algunas restricciones adicionales.
|
||||
|
||||
## Divulgación de Seguridad
|
||||
|
||||
Para proteger tu privacidad, evita publicar problemas de seguridad en GitHub. En su lugar, envía tus preguntas a security@dify.ai y te proporcionaremos una respuesta más detallada.
|
||||
|
||||
## Licencia
|
||||
|
||||
Este repositorio está disponible bajo la [Licencia de Código Abierto de Dify](../LICENSE), que es esencialmente Apache 2.0 con algunas restricciones adicionales.
|
||||
Este repositorio está disponible bajo la [Licencia de Código Abierto de Dify](../../LICENSE), que es esencialmente Apache 2.0 con algunas restricciones adicionales.
|
||||
@@ -6,7 +6,7 @@ Nous devons être agiles et livrer rapidement compte tenu de notre position, mai
|
||||
|
||||
Ce guide, comme Dify lui-même, est un travail en constante évolution. Nous apprécions grandement votre compréhension si parfois il est en retard par rapport au projet réel, et nous accueillons tout commentaire pour nous aider à nous améliorer.
|
||||
|
||||
En termes de licence, veuillez prendre une minute pour lire notre bref [Accord de Licence et de Contributeur](../LICENSE). La communauté adhère également au [code de conduite](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
|
||||
En termes de licence, veuillez prendre une minute pour lire notre bref [Accord de Licence et de Contributeur](../../LICENSE). La communauté adhère également au [code de conduite](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
|
||||
|
||||
## Avant de vous lancer
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
<p align="center">
|
||||
<a href="https://cloud.dify.ai">Dify Cloud</a> ·
|
||||
@@ -35,17 +35,19 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
<a href="../../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="../zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="../zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="../ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="../es-ES/README.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="../fr-FR/README.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="../tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="../ko-KR/README.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="../ar-SA/README.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="../tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="../vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="../de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="../bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
#
|
||||
@@ -108,7 +110,7 @@ Mettez une étoile à Dify sur GitHub et soyez instantanément informé des nouv
|
||||
|
||||
</br>
|
||||
|
||||
La manière la plus simple de démarrer le serveur Dify est d'exécuter notre fichier [docker-compose.yml](docker/docker-compose.yaml). Avant d'exécuter la commande d'installation, assurez-vous que [Docker](https://docs.docker.com/get-docker/) et [Docker Compose](https://docs.docker.com/compose/install/) sont installés sur votre machine:
|
||||
La manière la plus simple de démarrer le serveur Dify est d'exécuter notre fichier [docker-compose.yml](../../docker/docker-compose.yaml). Avant d'exécuter la commande d'installation, assurez-vous que [Docker](https://docs.docker.com/get-docker/) et [Docker Compose](https://docs.docker.com/compose/install/) sont installés sur votre machine:
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
@@ -122,7 +124,7 @@ Après l'exécution, vous pouvez accéder au tableau de bord Dify dans votre nav
|
||||
|
||||
## Prochaines étapes
|
||||
|
||||
Si vous devez personnaliser la configuration, veuillez vous référer aux commentaires dans notre fichier [.env.example](docker/.env.example) et mettre à jour les valeurs correspondantes dans votre fichier `.env`. De plus, vous devrez peut-être apporter des modifications au fichier `docker-compose.yaml` lui-même, comme changer les versions d'image, les mappages de ports ou les montages de volumes, en fonction de votre environnement de déploiement et de vos exigences spécifiques. Après avoir effectué des modifications, veuillez réexécuter `docker-compose up -d`. Vous pouvez trouver la liste complète des variables d'environnement disponibles [ici](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
Si vous devez personnaliser la configuration, veuillez vous référer aux commentaires dans notre fichier [.env.example](../../docker/.env.example) et mettre à jour les valeurs correspondantes dans votre fichier `.env`. De plus, vous devrez peut-être apporter des modifications au fichier `docker-compose.yaml` lui-même, comme changer les versions d'image, les mappages de ports ou les montages de volumes, en fonction de votre environnement de déploiement et de vos exigences spécifiques. Après avoir effectué des modifications, veuillez réexécuter `docker-compose up -d`. Vous pouvez trouver la liste complète des variables d'environnement disponibles [ici](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
Si vous souhaitez configurer une configuration haute disponibilité, la communauté fournit des [Helm Charts](https://helm.sh/) et des fichiers YAML, à travers lesquels vous pouvez déployer Dify sur Kubernetes.
|
||||
|
||||
@@ -168,7 +170,7 @@ Déployez Dify sur AKS en un clic en utilisant [Azure Devops Pipeline Helm Chart
|
||||
|
||||
## Contribuer
|
||||
|
||||
Pour ceux qui souhaitent contribuer du code, consultez notre [Guide de contribution](https://github.com/langgenius/dify/blob/main/CONTRIBUTING/CONTRIBUTING_FR.md).
|
||||
Pour ceux qui souhaitent contribuer du code, consultez notre [Guide de contribution](./CONTRIBUTING.md).
|
||||
Dans le même temps, veuillez envisager de soutenir Dify en le partageant sur les réseaux sociaux et lors d'événements et de conférences.
|
||||
|
||||
> Nous recherchons des contributeurs pour aider à traduire Dify dans des langues autres que le mandarin ou l'anglais. Si vous êtes intéressé à aider, veuillez consulter le [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) pour plus d'informations, et laissez-nous un commentaire dans le canal `global-users` de notre [Serveur communautaire Discord](https://discord.gg/8Tpq4AcN9c).
|
||||
@@ -182,7 +184,7 @@ Dans le même temps, veuillez envisager de soutenir Dify en le partageant sur le
|
||||
## Communauté & Contact
|
||||
|
||||
- [Discussion GitHub](https://github.com/langgenius/dify/discussions). Meilleur pour: partager des commentaires et poser des questions.
|
||||
- [Problèmes GitHub](https://github.com/langgenius/dify/issues). Meilleur pour: les bogues que vous rencontrez en utilisant Dify.AI et les propositions de fonctionnalités. Consultez notre [Guide de contribution](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
|
||||
- [Problèmes GitHub](https://github.com/langgenius/dify/issues). Meilleur pour: les bogues que vous rencontrez en utilisant Dify.AI et les propositions de fonctionnalités. Consultez notre [Guide de contribution](./CONTRIBUTING.md).
|
||||
- [Discord](https://discord.gg/FngNHpbcY7). Meilleur pour: partager vos applications et passer du temps avec la communauté.
|
||||
- [X(Twitter)](https://twitter.com/dify_ai). Meilleur pour: partager vos applications et passer du temps avec la communauté.
|
||||
|
||||
@@ -196,12 +198,4 @@ Pour protéger votre vie privée, veuillez éviter de publier des problèmes de
|
||||
|
||||
## Licence
|
||||
|
||||
Ce référentiel est disponible sous la [Licence open source Dify](../LICENSE), qui est essentiellement l'Apache 2.0 avec quelques restrictions supplémentaires.
|
||||
|
||||
## Divulgation de sécurité
|
||||
|
||||
Pour protéger votre vie privée, veuillez éviter de publier des problèmes de sécurité sur GitHub. Au lieu de cela, envoyez vos questions à security@dify.ai et nous vous fournirons une réponse plus détaillée.
|
||||
|
||||
## Licence
|
||||
|
||||
Ce référentiel est disponible sous la [Licence open source Dify](../LICENSE), qui est essentiellement l'Apache 2.0 avec quelques restrictions supplémentaires.
|
||||
Ce référentiel est disponible sous la [Licence open source Dify](../../LICENSE), qui est essentiellement l'Apache 2.0 avec quelques restrictions supplémentaires.
|
||||
@@ -6,7 +6,7 @@ Difyに貢献しようとお考えですか?素晴らしいですね。私た
|
||||
|
||||
このガイドは、Dify自体と同様に、常に進化し続けています。実際のプロジェクトの進行状況と多少のずれが生じる場合もございますが、ご理解いただけますと幸いです。改善のためのフィードバックも歓迎いたします。
|
||||
|
||||
ライセンスについては、[ライセンスと貢献者同意書](../LICENSE)をご一読ください。また、コミュニティは[行動規範](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)に従っています。
|
||||
ライセンスについては、[ライセンスと貢献者同意書](../../LICENSE)をご一読ください。また、コミュニティは[行動規範](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)に従っています。
|
||||
|
||||
## 始める前に
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
<p align="center">
|
||||
<a href="https://cloud.dify.ai">Dify Cloud</a> ·
|
||||
@@ -35,17 +35,19 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
<a href="../../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="../zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="../zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="../ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="../es-ES/README.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="../fr-FR/README.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="../tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="../ko-KR/README.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="../ar-SA/README.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="../tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="../vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="../de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="../bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
#
|
||||
@@ -109,7 +111,7 @@ GitHub上でDifyにスターを付けることで、Difyに関する新しいニ
|
||||
|
||||
</br>
|
||||
|
||||
Difyサーバーを起動する最も簡単な方法は、[docker-compose.yml](docker/docker-compose.yaml)ファイルを実行することです。インストールコマンドを実行する前に、マシンに[Docker](https://docs.docker.com/get-docker/)と[Docker Compose](https://docs.docker.com/compose/install/)がインストールされていることを確認してください。
|
||||
Difyサーバーを起動する最も簡単な方法は、[docker-compose.yml](../../docker/docker-compose.yaml)ファイルを実行することです。インストールコマンドを実行する前に、マシンに[Docker](https://docs.docker.com/get-docker/)と[Docker Compose](https://docs.docker.com/compose/install/)がインストールされていることを確認してください。
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
@@ -123,7 +125,7 @@ docker compose up -d
|
||||
|
||||
## 次のステップ
|
||||
|
||||
設定をカスタマイズする必要がある場合は、[.env.example](docker/.env.example) ファイルのコメントを参照し、`.env` ファイルの対応する値を更新してください。さらに、デプロイ環境や要件に応じて、`docker-compose.yaml` ファイル自体を調整する必要がある場合があります。たとえば、イメージのバージョン、ポートのマッピング、ボリュームのマウントなどを変更します。変更を加えた後は、`docker-compose up -d` を再実行してください。利用可能な環境変数の全一覧は、[こちら](https://docs.dify.ai/getting-started/install-self-hosted/environments)で確認できます。
|
||||
設定をカスタマイズする必要がある場合は、[.env.example](../../docker/.env.example) ファイルのコメントを参照し、`.env` ファイルの対応する値を更新してください。さらに、デプロイ環境や要件に応じて、`docker-compose.yaml` ファイル自体を調整する必要がある場合があります。たとえば、イメージのバージョン、ポートのマッピング、ボリュームのマウントなどを変更します。変更を加えた後は、`docker-compose up -d` を再実行してください。利用可能な環境変数の全一覧は、[こちら](https://docs.dify.ai/getting-started/install-self-hosted/environments)で確認できます。
|
||||
|
||||
高可用性設定を設定する必要がある場合、コミュニティは[Helm Charts](https://helm.sh/)とYAMLファイルにより、DifyをKubernetesにデプロイすることができます。
|
||||
|
||||
@@ -169,7 +171,7 @@ docker compose up -d
|
||||
|
||||
## 貢献
|
||||
|
||||
コードに貢献したい方は、[Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING/CONTRIBUTING_JA.md)を参照してください。
|
||||
コードに貢献したい方は、[Contribution Guide](./CONTRIBUTING.md)を参照してください。
|
||||
同時に、DifyをSNSやイベント、カンファレンスで共有してサポートしていただけると幸いです。
|
||||
|
||||
> Difyを英語または中国語以外の言語に翻訳してくれる貢献者を募集しています。興味がある場合は、詳細については[i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md)を参照してください。また、[Discordコミュニティサーバー](https://discord.gg/8Tpq4AcN9c)の`global-users`チャンネルにコメントを残してください。
|
||||
@@ -183,10 +185,10 @@ docker compose up -d
|
||||
## コミュニティ & お問い合わせ
|
||||
|
||||
- [GitHub Discussion](https://github.com/langgenius/dify/discussions). 主に: フィードバックの共有や質問。
|
||||
- [GitHub Issues](https://github.com/langgenius/dify/issues). 主に: Dify.AIを使用する際に発生するエラーや問題については、[貢献ガイド](../CONTRIBUTING/CONTRIBUTING_JA.md)を参照してください
|
||||
- [GitHub Issues](https://github.com/langgenius/dify/issues). 主に: Dify.AIを使用する際に発生するエラーや問題については、[貢献ガイド](./CONTRIBUTING.md)を参照してください
|
||||
- [Discord](https://discord.gg/FngNHpbcY7). 主に: アプリケーションの共有やコミュニティとの交流。
|
||||
- [X(Twitter)](https://twitter.com/dify_ai). 主に: アプリケーションの共有やコミュニティとの交流。
|
||||
|
||||
## ライセンス
|
||||
|
||||
このリポジトリは、Dify Open Source License にいくつかの追加制限を加えた[Difyオープンソースライセンス](../LICENSE)の下で利用可能です。
|
||||
このリポジトリは、Dify Open Source License にいくつかの追加制限を加えた[Difyオープンソースライセンス](../../LICENSE)の下で利用可能です。
|
||||
@@ -6,7 +6,7 @@ Dify에 기여하려고 하시는군요 - 정말 멋집니다, 당신이 무엇
|
||||
|
||||
이 가이드는 Dify 자체와 마찬가지로 끊임없이 진행 중인 작업입니다. 때로는 실제 프로젝트보다 뒤처질 수 있다는 점을 이해해 주시면 감사하겠으며, 개선을 위한 피드백은 언제든지 환영합니다.
|
||||
|
||||
라이센스 측면에서, 간략한 [라이센스 및 기여자 동의서](../LICENSE)를 읽어보는 시간을 가져주세요. 커뮤니티는 또한 [행동 강령](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)을 준수합니다.
|
||||
라이센스 측면에서, 간략한 [라이센스 및 기여자 동의서](../../LICENSE)를 읽어보는 시간을 가져주세요. 커뮤니티는 또한 [행동 강령](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)을 준수합니다.
|
||||
|
||||
## 시작하기 전에
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
<p align="center">
|
||||
<a href="https://cloud.dify.ai">Dify 클라우드</a> ·
|
||||
@@ -35,17 +35,19 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
<a href="../../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="../zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="../zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="../ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="../es-ES/README.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="../fr-FR/README.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="../tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="../ko-KR/README.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="../ar-SA/README.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="../tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="../vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="../de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="../bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
Dify는 오픈 소스 LLM 앱 개발 플랫폼입니다. 직관적인 인터페이스를 통해 AI 워크플로우, RAG 파이프라인, 에이전트 기능, 모델 관리, 관찰 기능 등을 결합하여 프로토타입에서 프로덕션까지 빠르게 전환할 수 있습니다. 주요 기능 목록은 다음과 같습니다:</br> </br>
|
||||
@@ -102,7 +104,7 @@ GitHub에서 Dify에 별표를 찍어 새로운 릴리스를 즉시 알림 받
|
||||
|
||||
</br>
|
||||
|
||||
Dify 서버를 시작하는 가장 쉬운 방법은 [docker-compose.yml](docker/docker-compose.yaml) 파일을 실행하는 것입니다. 설치 명령을 실행하기 전에 [Docker](https://docs.docker.com/get-docker/) 및 [Docker Compose](https://docs.docker.com/compose/install/)가 머신에 설치되어 있는지 확인하세요.
|
||||
Dify 서버를 시작하는 가장 쉬운 방법은 [docker-compose.yml](../../docker/docker-compose.yaml) 파일을 실행하는 것입니다. 설치 명령을 실행하기 전에 [Docker](https://docs.docker.com/get-docker/) 및 [Docker Compose](https://docs.docker.com/compose/install/)가 머신에 설치되어 있는지 확인하세요.
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
@@ -116,7 +118,7 @@ docker compose up -d
|
||||
|
||||
## 다음 단계
|
||||
|
||||
구성을 사용자 정의해야 하는 경우 [.env.example](docker/.env.example) 파일의 주석을 참조하고 `.env` 파일에서 해당 값을 업데이트하십시오. 또한 특정 배포 환경 및 요구 사항에 따라 `docker-compose.yaml` 파일 자체를 조정해야 할 수도 있습니다. 예를 들어 이미지 버전, 포트 매핑 또는 볼륨 마운트를 변경합니다. 변경 한 후 `docker-compose up -d`를 다시 실행하십시오. 사용 가능한 환경 변수의 전체 목록은 [여기](https://docs.dify.ai/getting-started/install-self-hosted/environments)에서 찾을 수 있습니다.
|
||||
구성을 사용자 정의해야 하는 경우 [.env.example](../../docker/.env.example) 파일의 주석을 참조하고 `.env` 파일에서 해당 값을 업데이트하십시오. 또한 특정 배포 환경 및 요구 사항에 따라 `docker-compose.yaml` 파일 자체를 조정해야 할 수도 있습니다. 예를 들어 이미지 버전, 포트 매핑 또는 볼륨 마운트를 변경합니다. 변경 한 후 `docker-compose up -d`를 다시 실행하십시오. 사용 가능한 환경 변수의 전체 목록은 [여기](https://docs.dify.ai/getting-started/install-self-hosted/environments)에서 찾을 수 있습니다.
|
||||
|
||||
Dify를 Kubernetes에 배포하고 프리미엄 스케일링 설정을 구성했다는 커뮤니티가 제공하는 [Helm Charts](https://helm.sh/)와 YAML 파일이 존재합니다.
|
||||
|
||||
@@ -162,7 +164,7 @@ Dify를 Kubernetes에 배포하고 프리미엄 스케일링 설정을 구성했
|
||||
|
||||
## 기여
|
||||
|
||||
코드에 기여하고 싶은 분들은 [기여 가이드](https://github.com/langgenius/dify/blob/main/CONTRIBUTING/CONTRIBUTING_KR.md)를 참조하세요.
|
||||
코드에 기여하고 싶은 분들은 [기여 가이드](./CONTRIBUTING.md)를 참조하세요.
|
||||
동시에 Dify를 소셜 미디어와 행사 및 컨퍼런스에 공유하여 지원하는 것을 고려해 주시기 바랍니다.
|
||||
|
||||
> 우리는 Dify를 중국어나 영어 이외의 언어로 번역하는 데 도움을 줄 수 있는 기여자를 찾고 있습니다. 도움을 주고 싶으시다면 [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md)에서 더 많은 정보를 확인하시고 [Discord 커뮤니티 서버](https://discord.gg/8Tpq4AcN9c)의 `global-users` 채널에 댓글을 남겨주세요.
|
||||
@@ -176,7 +178,7 @@ Dify를 Kubernetes에 배포하고 프리미엄 스케일링 설정을 구성했
|
||||
## 커뮤니티 & 연락처
|
||||
|
||||
- [GitHub 토론](https://github.com/langgenius/dify/discussions). 피드백 공유 및 질문하기에 적합합니다.
|
||||
- [GitHub 이슈](https://github.com/langgenius/dify/issues). Dify.AI 사용 중 발견한 버그와 기능 제안에 적합합니다. [기여 가이드](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)를 참조하세요.
|
||||
- [GitHub 이슈](https://github.com/langgenius/dify/issues). Dify.AI 사용 중 발견한 버그와 기능 제안에 적합합니다. [기여 가이드](./CONTRIBUTING.md)를 참조하세요.
|
||||
- [디스코드](https://discord.gg/FngNHpbcY7). 애플리케이션 공유 및 커뮤니티와 소통하기에 적합합니다.
|
||||
- [트위터](https://twitter.com/dify_ai). 애플리케이션 공유 및 커뮤니티와 소통하기에 적합합니다.
|
||||
|
||||
@@ -190,4 +192,4 @@ Dify를 Kubernetes에 배포하고 프리미엄 스케일링 설정을 구성했
|
||||
|
||||
## 라이선스
|
||||
|
||||
이 저장소는 기본적으로 몇 가지 추가 제한 사항이 있는 Apache 2.0인 [Dify 오픈 소스 라이선스](../LICENSE)에 따라 사용할 수 있습니다.
|
||||
이 저장소는 기본적으로 몇 가지 추가 제한 사항이 있는 Apache 2.0인 [Dify 오픈 소스 라이선스](../../LICENSE)에 따라 사용할 수 있습니다.
|
||||
@@ -6,7 +6,7 @@ Precisamos ser ágeis e entregar rapidamente considerando onde estamos, mas tamb
|
||||
|
||||
Este guia, como o próprio Dify, é um trabalho em constante evolução. Agradecemos muito a sua compreensão se às vezes ele ficar atrasado em relação ao projeto real, e damos as boas-vindas a qualquer feedback para que possamos melhorar.
|
||||
|
||||
Em termos de licenciamento, por favor, dedique um minuto para ler nosso breve [Acordo de Licença e Contribuidor](../LICENSE). A comunidade também adere ao [código de conduta](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
|
||||
Em termos de licenciamento, por favor, dedique um minuto para ler nosso breve [Acordo de Licença e Contribuidor](../../LICENSE). A comunidade também adere ao [código de conduta](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
|
||||
|
||||
## Antes de começar
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
<p align="center">
|
||||
📌 <a href="https://dify.ai/blog/introducing-dify-workflow-file-upload-a-demo-on-ai-podcast">Introduzindo o Dify Workflow com Upload de Arquivo: Recrie o Podcast Google NotebookLM</a>
|
||||
@@ -39,18 +39,20 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="../README.md"><img alt="README em Inglês" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README em Espanhol" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README em Francês" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README em Coreano" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README em Árabe" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="README em Turco" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README em Vietnamita" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_PT.md"><img alt="README em Português - BR" src="https://img.shields.io/badge/Portugu%C3%AAs-BR?style=flat&label=BR&color=d9d9d9"></a>
|
||||
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
<a href="../../README.md"><img alt="README em Inglês" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="../zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="../zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="../ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="../es-ES/README.md"><img alt="README em Espanhol" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="../fr-FR/README.md"><img alt="README em Francês" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="../tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="../ko-KR/README.md"><img alt="README em Coreano" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="../ar-SA/README.md"><img alt="README em Árabe" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="../tr-TR/README.md"><img alt="README em Turco" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="../vi-VN/README.md"><img alt="README em Vietnamita" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="../pt-BR/README.md"><img alt="README em Português - BR" src="https://img.shields.io/badge/Portugu%C3%AAs-BR?style=flat&label=BR&color=d9d9d9"></a>
|
||||
<a href="../de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="../bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
Dify é uma plataforma de desenvolvimento de aplicativos LLM de código aberto. Sua interface intuitiva combina workflow de IA, pipeline RAG, capacidades de agente, gerenciamento de modelos, recursos de observabilidade e muito mais, permitindo que você vá rapidamente do protótipo à produção. Aqui está uma lista das principais funcionalidades:
|
||||
@@ -108,7 +110,7 @@ Dê uma estrela no Dify no GitHub e seja notificado imediatamente sobre novos la
|
||||
|
||||
</br>
|
||||
|
||||
A maneira mais fácil de iniciar o servidor Dify é executar nosso arquivo [docker-compose.yml](docker/docker-compose.yaml). Antes de rodar o comando de instalação, certifique-se de que o [Docker](https://docs.docker.com/get-docker/) e o [Docker Compose](https://docs.docker.com/compose/install/) estão instalados na sua máquina:
|
||||
A maneira mais fácil de iniciar o servidor Dify é executar nosso arquivo [docker-compose.yml](../../docker/docker-compose.yaml). Antes de rodar o comando de instalação, certifique-se de que o [Docker](https://docs.docker.com/get-docker/) e o [Docker Compose](https://docs.docker.com/compose/install/) estão instalados na sua máquina:
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
@@ -122,7 +124,7 @@ Após a execução, você pode acessar o painel do Dify no navegador em [http://
|
||||
|
||||
## Próximos passos
|
||||
|
||||
Se precisar personalizar a configuração, consulte os comentários no nosso arquivo [.env.example](docker/.env.example) e atualize os valores correspondentes no seu arquivo `.env`. Além disso, talvez seja necessário fazer ajustes no próprio arquivo `docker-compose.yaml`, como alterar versões de imagem, mapeamentos de portas ou montagens de volumes, com base no seu ambiente de implantação específico e nas suas necessidades. Após fazer quaisquer alterações, execute novamente `docker-compose up -d`. Você pode encontrar a lista completa de variáveis de ambiente disponíveis [aqui](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
Se precisar personalizar a configuração, consulte os comentários no nosso arquivo [.env.example](../../docker/.env.example) e atualize os valores correspondentes no seu arquivo `.env`. Além disso, talvez seja necessário fazer ajustes no próprio arquivo `docker-compose.yaml`, como alterar versões de imagem, mapeamentos de portas ou montagens de volumes, com base no seu ambiente de implantação específico e nas suas necessidades. Após fazer quaisquer alterações, execute novamente `docker-compose up -d`. Você pode encontrar a lista completa de variáveis de ambiente disponíveis [aqui](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
Se deseja configurar uma instalação de alta disponibilidade, há [Helm Charts](https://helm.sh/) e arquivos YAML contribuídos pela comunidade que permitem a implantação do Dify no Kubernetes.
|
||||
|
||||
@@ -168,7 +170,7 @@ Implante o Dify no AKS com um clique usando [Azure Devops Pipeline Helm Chart by
|
||||
|
||||
## Contribuindo
|
||||
|
||||
Para aqueles que desejam contribuir com código, veja nosso [Guia de Contribuição](https://github.com/langgenius/dify/blob/main/CONTRIBUTING/CONTRIBUTING_PT.md).
|
||||
Para aqueles que desejam contribuir com código, veja nosso [Guia de Contribuição](./CONTRIBUTING.md).
|
||||
Ao mesmo tempo, considere apoiar o Dify compartilhando-o nas redes sociais e em eventos e conferências.
|
||||
|
||||
> Estamos buscando contribuidores para ajudar na tradução do Dify para idiomas além de Mandarim e Inglês. Se você tiver interesse em ajudar, consulte o [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) para mais informações e deixe-nos um comentário no canal `global-users` em nosso [Servidor da Comunidade no Discord](https://discord.gg/8Tpq4AcN9c).
|
||||
@@ -182,7 +184,7 @@ Ao mesmo tempo, considere apoiar o Dify compartilhando-o nas redes sociais e em
|
||||
## Comunidade e contato
|
||||
|
||||
- [Discussões no GitHub](https://github.com/langgenius/dify/discussions). Melhor para: compartilhar feedback e fazer perguntas.
|
||||
- [Problemas no GitHub](https://github.com/langgenius/dify/issues). Melhor para: relatar bugs encontrados no Dify.AI e propor novos recursos. Veja nosso [Guia de Contribuição](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
|
||||
- [Problemas no GitHub](https://github.com/langgenius/dify/issues). Melhor para: relatar bugs encontrados no Dify.AI e propor novos recursos. Veja nosso [Guia de Contribuição](./CONTRIBUTING.md).
|
||||
- [Discord](https://discord.gg/FngNHpbcY7). Melhor para: compartilhar suas aplicações e interagir com a comunidade.
|
||||
- [X(Twitter)](https://twitter.com/dify_ai). Melhor para: compartilhar suas aplicações e interagir com a comunidade.
|
||||
|
||||
@@ -196,4 +198,4 @@ Para proteger sua privacidade, evite postar problemas de segurança no GitHub. E
|
||||
|
||||
## Licença
|
||||
|
||||
Este repositório está disponível sob a [Licença de Código Aberto Dify](../LICENSE), que é essencialmente Apache 2.0 com algumas restrições adicionais.
|
||||
Este repositório está disponível sob a [Licença de Código Aberto Dify](../../LICENSE), que é essencialmente Apache 2.0 com algumas restrições adicionais.
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
<p align="center">
|
||||
📌 <a href="https://dify.ai/blog/introducing-dify-workflow-file-upload-a-demo-on-ai-podcast">Predstavljamo nalaganje datotek Dify Workflow: znova ustvarite Google NotebookLM Podcast</a>
|
||||
@@ -36,18 +36,20 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_SI.md"><img alt="README Slovenščina" src="https://img.shields.io/badge/Sloven%C5%A1%C4%8Dina-d9d9d9"></a>
|
||||
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
<a href="../../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="../zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="../zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="../ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="../es-ES/README.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="../fr-FR/README.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="../tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="../ko-KR/README.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="../ar-SA/README.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="../tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="../vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="../sl-SI/README.md"><img alt="README Slovenščina" src="https://img.shields.io/badge/Sloven%C5%A1%C4%8Dina-d9d9d9"></a>
|
||||
<a href="../de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="../bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
Dify je odprtokodna platforma za razvoj aplikacij LLM. Njegov intuitivni vmesnik združuje agentski potek dela z umetno inteligenco, cevovod RAG, zmogljivosti agentov, upravljanje modelov, funkcije opazovanja in več, kar vam omogoča hiter prehod od prototipa do proizvodnje.
|
||||
@@ -169,7 +171,7 @@ Z enim klikom namestite Dify v AKS z uporabo [Azure Devops Pipeline Helm Chart b
|
||||
|
||||
## Prispevam
|
||||
|
||||
Za tiste, ki bi radi prispevali kodo, si oglejte naš vodnik za prispevke . Hkrati vas prosimo, da podprete Dify tako, da ga delite na družbenih medijih ter na dogodkih in konferencah.
|
||||
Za tiste, ki bi radi prispevali kodo, si oglejte naš [vodnik za prispevke](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md). Hkrati vas prosimo, da podprete Dify tako, da ga delite na družbenih medijih ter na dogodkih in konferencah.
|
||||
|
||||
> Iščemo sodelavce za pomoč pri prevajanju Difyja v jezike, ki niso mandarinščina ali angleščina. Če želite pomagati, si oglejte i18n README za več informacij in nam pustite komentar v global-userskanalu našega strežnika skupnosti Discord .
|
||||
|
||||
@@ -196,4 +198,4 @@ Zaradi zaščite vaše zasebnosti se izogibajte objavljanju varnostnih vprašanj
|
||||
|
||||
## Licenca
|
||||
|
||||
To skladišče je na voljo pod [odprtokodno licenco Dify](../LICENSE) , ki je v bistvu Apache 2.0 z nekaj dodatnimi omejitvami.
|
||||
To skladišče je na voljo pod [odprtokodno licenco Dify](../../LICENSE) , ki je v bistvu Apache 2.0 z nekaj dodatnimi omejitvami.
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
<p align="center">
|
||||
<a href="https://cloud.dify.ai">Dify Cloud</a> ·
|
||||
@@ -35,17 +35,19 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
<a href="../../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="../zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="../zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="../ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="../es-ES/README.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="../fr-FR/README.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="../tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="../ko-KR/README.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="../ar-SA/README.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="../tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="../vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="../de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="../bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
#
|
||||
@@ -108,7 +110,7 @@ Star Dify on GitHub and be instantly notified of new releases.
|
||||
|
||||
</br>
|
||||
|
||||
The easiest way to start the Dify server is to run our [docker-compose.yml](docker/docker-compose.yaml) file. Before running the installation command, make sure that [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) are installed on your machine:
|
||||
The easiest way to start the Dify server is to run our [docker-compose.yml](../../docker/docker-compose.yaml) file. Before running the installation command, make sure that [Docker](https://docs.docker.com/get-docker/) and [Docker Compose](https://docs.docker.com/compose/install/) are installed on your machine:
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
@@ -122,7 +124,7 @@ After running, you can access the Dify dashboard in your browser at [http://loca
|
||||
|
||||
## Next steps
|
||||
|
||||
If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker-compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
If you need to customize the configuration, please refer to the comments in our [.env.example](../../docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker-compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
If you'd like to configure a highly-available setup, there are community-contributed [Helm Charts](https://helm.sh/) and YAML files which allow Dify to be deployed on Kubernetes.
|
||||
|
||||
@@ -181,10 +183,7 @@ At the same time, please consider supporting Dify by sharing it on social media
|
||||
|
||||
## Community & Contact
|
||||
|
||||
- \[GitHub Discussion\](https://github.com/langgenius/dify/discussions
|
||||
|
||||
). Best for: sharing feedback and asking questions.
|
||||
|
||||
- [GitHub Discussion](https://github.com/langgenius/dify/discussions). Best for: sharing feedback and asking questions.
|
||||
- [GitHub Issues](https://github.com/langgenius/dify/issues). Best for: bugs you encounter using Dify.AI, and feature proposals. See our [Contribution Guide](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md).
|
||||
- [Discord](https://discord.gg/FngNHpbcY7). Best for: sharing your applications and hanging out with the community.
|
||||
- [X(Twitter)](https://twitter.com/dify_ai). Best for: sharing your applications and hanging out with the community.
|
||||
@@ -199,4 +198,4 @@ To protect your privacy, please avoid posting security issues on GitHub. Instead
|
||||
|
||||
## License
|
||||
|
||||
This repository is available under the [Dify Open Source License](../LICENSE), which is essentially Apache 2.0 with a few additional restrictions.
|
||||
This repository is available under the [Dify Open Source License](../../LICENSE), which is essentially Apache 2.0 with a few additional restrictions.
|
||||
@@ -6,7 +6,7 @@ Bulunduğumuz noktada çevik olmamız ve hızlı hareket etmemiz gerekiyor, anca
|
||||
|
||||
Bu rehber, Dify'ın kendisi gibi, sürekli gelişen bir çalışmadır. Bazen gerçek projenin gerisinde kalırsa anlayışınız için çok minnettarız ve gelişmemize yardımcı olacak her türlü geri bildirimi memnuniyetle karşılıyoruz.
|
||||
|
||||
Lisanslama konusunda, lütfen kısa [Lisans ve Katkıda Bulunan Anlaşmamızı](../LICENSE) okumak için bir dakikanızı ayırın. Topluluk ayrıca [davranış kurallarına](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md) da uyar.
|
||||
Lisanslama konusunda, lütfen kısa [Lisans ve Katkıda Bulunan Anlaşmamızı](../../LICENSE) okumak için bir dakikanızı ayırın. Topluluk ayrıca [davranış kurallarına](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md) da uyar.
|
||||
|
||||
## Başlamadan Önce
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
<p align="center">
|
||||
<a href="https://cloud.dify.ai">Dify Bulut</a> ·
|
||||
@@ -35,17 +35,19 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
<a href="../../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="../zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="../zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="../ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="../es-ES/README.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="../fr-FR/README.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="../tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="../ko-KR/README.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="../ar-SA/README.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="../tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="../vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="../de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="../bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
Dify, açık kaynaklı bir LLM uygulama geliştirme platformudur. Sezgisel arayüzü, AI iş akışı, RAG pipeline'ı, ajan yetenekleri, model yönetimi, gözlemlenebilirlik özellikleri ve daha fazlasını birleştirerek, prototipten üretime hızlıca geçmenizi sağlar. İşte temel özelliklerin bir listesi:
|
||||
@@ -102,7 +104,7 @@ GitHub'da Dify'a yıldız verin ve yeni sürümlerden anında haberdar olun.
|
||||
> - RAM >= 4GB
|
||||
|
||||
</br>
|
||||
Dify sunucusunu başlatmanın en kolay yolu, [docker-compose.yml](docker/docker-compose.yaml) dosyamızı çalıştırmaktır. Kurulum komutunu çalıştırmadan önce, makinenizde [Docker](https://docs.docker.com/get-docker/) ve [Docker Compose](https://docs.docker.com/compose/install/)'un kurulu olduğundan emin olun:
|
||||
Dify sunucusunu başlatmanın en kolay yolu, [docker-compose.yml](../../docker/docker-compose.yaml) dosyamızı çalıştırmaktır. Kurulum komutunu çalıştırmadan önce, makinenizde [Docker](https://docs.docker.com/get-docker/) ve [Docker Compose](https://docs.docker.com/compose/install/)'un kurulu olduğundan emin olun:
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
@@ -116,7 +118,7 @@ docker compose up -d
|
||||
|
||||
## Sonraki adımlar
|
||||
|
||||
Yapılandırmayı özelleştirmeniz gerekiyorsa, lütfen [.env.example](docker/.env.example) dosyamızdaki yorumlara bakın ve `.env` dosyanızdaki ilgili değerleri güncelleyin. Ayrıca, spesifik dağıtım ortamınıza ve gereksinimlerinize bağlı olarak `docker-compose.yaml` dosyasının kendisinde de, imaj sürümlerini, port eşlemelerini veya hacim bağlantılarını değiştirmek gibi ayarlamalar yapmanız gerekebilir. Herhangi bir değişiklik yaptıktan sonra, lütfen `docker-compose up -d` komutunu tekrar çalıştırın. Kullanılabilir tüm ortam değişkenlerinin tam listesini [burada](https://docs.dify.ai/getting-started/install-self-hosted/environments) bulabilirsiniz.
|
||||
Yapılandırmayı özelleştirmeniz gerekiyorsa, lütfen [.env.example](../../docker/.env.example) dosyamızdaki yorumlara bakın ve `.env` dosyanızdaki ilgili değerleri güncelleyin. Ayrıca, spesifik dağıtım ortamınıza ve gereksinimlerinize bağlı olarak `docker-compose.yaml` dosyasının kendisinde de, imaj sürümlerini, port eşlemelerini veya hacim bağlantılarını değiştirmek gibi ayarlamalar yapmanız gerekebilir. Herhangi bir değişiklik yaptıktan sonra, lütfen `docker-compose up -d` komutunu tekrar çalıştırın. Kullanılabilir tüm ortam değişkenlerinin tam listesini [burada](https://docs.dify.ai/getting-started/install-self-hosted/environments) bulabilirsiniz.
|
||||
|
||||
Yüksek kullanılabilirliğe sahip bir kurulum yapılandırmak isterseniz, Dify'ın Kubernetes üzerine dağıtılmasına olanak tanıyan topluluk katkılı [Helm Charts](https://helm.sh/) ve YAML dosyaları mevcuttur.
|
||||
|
||||
@@ -161,7 +163,7 @@ Dify'ı bulut platformuna tek tıklamayla dağıtın [terraform](https://www.ter
|
||||
|
||||
## Katkıda Bulunma
|
||||
|
||||
Kod katkısında bulunmak isteyenler için [Katkı Kılavuzumuza](https://github.com/langgenius/dify/blob/main/CONTRIBUTING/CONTRIBUTING_TR.md) bakabilirsiniz.
|
||||
Kod katkısında bulunmak isteyenler için [Katkı Kılavuzumuza](./CONTRIBUTING.md) bakabilirsiniz.
|
||||
Aynı zamanda, lütfen Dify'ı sosyal medyada, etkinliklerde ve konferanslarda paylaşarak desteklemeyi düşünün.
|
||||
|
||||
> Dify'ı Mandarin veya İngilizce dışındaki dillere çevirmemize yardımcı olacak katkıda bulunanlara ihtiyacımız var. Yardımcı olmakla ilgileniyorsanız, lütfen daha fazla bilgi için [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) dosyasına bakın ve [Discord Topluluk Sunucumuzdaki](https://discord.gg/8Tpq4AcN9c) `global-users` kanalında bize bir yorum bırakın.
|
||||
@@ -175,7 +177,7 @@ Aynı zamanda, lütfen Dify'ı sosyal medyada, etkinliklerde ve konferanslarda p
|
||||
## Topluluk & iletişim
|
||||
|
||||
- [GitHub Tartışmaları](https://github.com/langgenius/dify/discussions). En uygun: geri bildirim paylaşmak ve soru sormak için.
|
||||
- [GitHub Sorunları](https://github.com/langgenius/dify/issues). En uygun: Dify.AI kullanırken karşılaştığınız hatalar ve özellik önerileri için. [Katkı Kılavuzumuza](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) bakın.
|
||||
- [GitHub Sorunları](https://github.com/langgenius/dify/issues). En uygun: Dify.AI kullanırken karşılaştığınız hatalar ve özellik önerileri için. [Katkı Kılavuzumuza](./CONTRIBUTING.md) bakın.
|
||||
- [Discord](https://discord.gg/FngNHpbcY7). En uygun: uygulamalarınızı paylaşmak ve toplulukla vakit geçirmek için.
|
||||
- [X(Twitter)](https://twitter.com/dify_ai). En uygun: uygulamalarınızı paylaşmak ve toplulukla vakit geçirmek için.
|
||||
|
||||
@@ -189,4 +191,4 @@ Gizliliğinizi korumak için, lütfen güvenlik sorunlarını GitHub'da paylaşm
|
||||
|
||||
## Lisans
|
||||
|
||||
Bu depo, temel olarak Apache 2.0 lisansı ve birkaç ek kısıtlama içeren [Dify Açık Kaynak Lisansı](../LICENSE) altında kullanıma sunulmuştur.
|
||||
Bu depo, temel olarak Apache 2.0 lisansı ve birkaç ek kısıtlama içeren [Dify Açık Kaynak Lisansı](../../LICENSE) altında kullanıma sunulmuştur.
|
||||
@@ -6,7 +6,7 @@ Chúng tôi cần phải nhanh nhẹn và triển khai nhanh chóng, nhưng cũn
|
||||
|
||||
Hướng dẫn này, giống như Dify, đang được phát triển liên tục. Chúng tôi rất cảm kích sự thông cảm của bạn nếu đôi khi nó chưa theo kịp dự án thực tế, và hoan nghênh mọi phản hồi để cải thiện.
|
||||
|
||||
Về giấy phép, vui lòng dành chút thời gian đọc [Thỏa thuận Cấp phép và Người đóng góp](../LICENSE) ngắn gọn của chúng tôi. Cộng đồng cũng tuân theo [quy tắc ứng xử](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
|
||||
Về giấy phép, vui lòng dành chút thời gian đọc [Thỏa thuận Cấp phép và Người đóng góp](../../LICENSE) ngắn gọn của chúng tôi. Cộng đồng cũng tuân theo [quy tắc ứng xử](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md).
|
||||
|
||||
## Trước khi bắt đầu
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
<p align="center">
|
||||
<a href="https://cloud.dify.ai">Dify Cloud</a> ·
|
||||
@@ -35,17 +35,19 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
<a href="../../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="../zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="../zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="../ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="../es-ES/README.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="../fr-FR/README.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="../tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="../ko-KR/README.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="../ar-SA/README.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="../tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="../vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="../de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="../bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
Dify là một nền tảng phát triển ứng dụng LLM mã nguồn mở. Giao diện trực quan kết hợp quy trình làm việc AI, mô hình RAG, khả năng tác nhân, quản lý mô hình, tính năng quan sát và hơn thế nữa, cho phép bạn nhanh chóng chuyển từ nguyên mẫu sang sản phẩm. Đây là danh sách các tính năng cốt lõi:
|
||||
@@ -103,7 +105,7 @@ Yêu thích Dify trên GitHub và được thông báo ngay lập tức về cá
|
||||
|
||||
</br>
|
||||
|
||||
Cách dễ nhất để khởi động máy chủ Dify là chạy tệp [docker-compose.yml](docker/docker-compose.yaml) của chúng tôi. Trước khi chạy lệnh cài đặt, hãy đảm bảo rằng [Docker](https://docs.docker.com/get-docker/) và [Docker Compose](https://docs.docker.com/compose/install/) đã được cài đặt trên máy của bạn:
|
||||
Cách dễ nhất để khởi động máy chủ Dify là chạy tệp [docker-compose.yml](../../docker/docker-compose.yaml) của chúng tôi. Trước khi chạy lệnh cài đặt, hãy đảm bảo rằng [Docker](https://docs.docker.com/get-docker/) và [Docker Compose](https://docs.docker.com/compose/install/) đã được cài đặt trên máy của bạn:
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
@@ -117,7 +119,7 @@ Sau khi chạy, bạn có thể truy cập bảng điều khiển Dify trong tr
|
||||
|
||||
## Các bước tiếp theo
|
||||
|
||||
Nếu bạn cần tùy chỉnh cấu hình, vui lòng tham khảo các nhận xét trong tệp [.env.example](docker/.env.example) của chúng tôi và cập nhật các giá trị tương ứng trong tệp `.env` của bạn. Ngoài ra, bạn có thể cần điều chỉnh tệp `docker-compose.yaml`, chẳng hạn như thay đổi phiên bản hình ảnh, ánh xạ cổng hoặc gắn kết khối lượng, dựa trên môi trường triển khai cụ thể và yêu cầu của bạn. Sau khi thực hiện bất kỳ thay đổi nào, vui lòng chạy lại `docker-compose up -d`. Bạn có thể tìm thấy danh sách đầy đủ các biến môi trường có sẵn [tại đây](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
Nếu bạn cần tùy chỉnh cấu hình, vui lòng tham khảo các nhận xét trong tệp [.env.example](../../docker/.env.example) của chúng tôi và cập nhật các giá trị tương ứng trong tệp `.env` của bạn. Ngoài ra, bạn có thể cần điều chỉnh tệp `docker-compose.yaml`, chẳng hạn như thay đổi phiên bản hình ảnh, ánh xạ cổng hoặc gắn kết khối lượng, dựa trên môi trường triển khai cụ thể và yêu cầu của bạn. Sau khi thực hiện bất kỳ thay đổi nào, vui lòng chạy lại `docker-compose up -d`. Bạn có thể tìm thấy danh sách đầy đủ các biến môi trường có sẵn [tại đây](https://docs.dify.ai/getting-started/install-self-hosted/environments).
|
||||
|
||||
Nếu bạn muốn cấu hình một cài đặt có độ sẵn sàng cao, có các [Helm Charts](https://helm.sh/) và tệp YAML do cộng đồng đóng góp cho phép Dify được triển khai trên Kubernetes.
|
||||
|
||||
@@ -162,7 +164,7 @@ Triển khai Dify lên AKS chỉ với một cú nhấp chuột bằng [Azure De
|
||||
|
||||
## Đóng góp
|
||||
|
||||
Đối với những người muốn đóng góp mã, xem [Hướng dẫn Đóng góp](https://github.com/langgenius/dify/blob/main/CONTRIBUTING/CONTRIBUTING_VI.md) của chúng tôi.
|
||||
Đối với những người muốn đóng góp mã, xem [Hướng dẫn Đóng góp](./CONTRIBUTING.md) của chúng tôi.
|
||||
Đồng thời, vui lòng xem xét hỗ trợ Dify bằng cách chia sẻ nó trên mạng xã hội và tại các sự kiện và hội nghị.
|
||||
|
||||
> Chúng tôi đang tìm kiếm người đóng góp để giúp dịch Dify sang các ngôn ngữ khác ngoài tiếng Trung hoặc tiếng Anh. Nếu bạn quan tâm đến việc giúp đỡ, vui lòng xem [README i18n](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) để biết thêm thông tin và để lại bình luận cho chúng tôi trong kênh `global-users` của [Máy chủ Cộng đồng Discord](https://discord.gg/8Tpq4AcN9c) của chúng tôi.
|
||||
@@ -176,7 +178,7 @@ Triển khai Dify lên AKS chỉ với một cú nhấp chuột bằng [Azure De
|
||||
## Cộng đồng & liên hệ
|
||||
|
||||
- [Thảo luận GitHub](https://github.com/langgenius/dify/discussions). Tốt nhất cho: chia sẻ phản hồi và đặt câu hỏi.
|
||||
- [Vấn đề GitHub](https://github.com/langgenius/dify/issues). Tốt nhất cho: lỗi bạn gặp phải khi sử dụng Dify.AI và đề xuất tính năng. Xem [Hướng dẫn Đóng góp](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md) của chúng tôi.
|
||||
- [Vấn đề GitHub](https://github.com/langgenius/dify/issues). Tốt nhất cho: lỗi bạn gặp phải khi sử dụng Dify.AI và đề xuất tính năng. Xem [Hướng dẫn Đóng góp](./CONTRIBUTING.md) của chúng tôi.
|
||||
- [Discord](https://discord.gg/FngNHpbcY7). Tốt nhất cho: chia sẻ ứng dụng của bạn và giao lưu với cộng đồng.
|
||||
- [X(Twitter)](https://twitter.com/dify_ai). Tốt nhất cho: chia sẻ ứng dụng của bạn và giao lưu với cộng đồng.
|
||||
|
||||
@@ -190,4 +192,4 @@ Triển khai Dify lên AKS chỉ với một cú nhấp chuột bằng [Azure De
|
||||
|
||||
## Giấy phép
|
||||
|
||||
Kho lưu trữ này có sẵn theo [Giấy phép Mã nguồn Mở Dify](../LICENSE), về cơ bản là Apache 2.0 với một vài hạn chế bổ sung.
|
||||
Kho lưu trữ này có sẵn theo [Giấy phép Mã nguồn Mở Dify](../../LICENSE), về cơ bản là Apache 2.0 với một vài hạn chế bổ sung.
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
本指南和 Dify 一样在不断完善中。如果有任何滞后于项目实际情况的地方,恳请谅解,我们也欢迎任何改进建议。
|
||||
|
||||
关于许可证,请花一分钟阅读我们简短的[许可和贡献者协议](../LICENSE)。同时也请遵循社区[行为准则](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)。
|
||||
关于许可证,请花一分钟阅读我们简短的[许可和贡献者协议](../../LICENSE)。同时也请遵循社区[行为准则](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)。
|
||||
|
||||
## 开始之前
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
<div align="center">
|
||||
<a href="https://cloud.dify.ai">Dify 云服务</a> ·
|
||||
@@ -35,17 +35,19 @@
|
||||
</p>
|
||||
|
||||
<div align="center">
|
||||
<a href="../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_BN.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
<a href="../../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="../zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="../zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="../ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="../es-ES/README.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="../fr-FR/README.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="../tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="../ko-KR/README.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="../ar-SA/README.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="../tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="../vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="../de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="../bn-BD/README.md"><img alt="README in বাংলা" src="https://img.shields.io/badge/বাংলা-d9d9d9"></a>
|
||||
</div>
|
||||
|
||||
#
|
||||
@@ -111,7 +113,7 @@ Dify 是一个开源的 LLM 应用开发平台。其直观的界面结合了 AI
|
||||
|
||||
### 快速启动
|
||||
|
||||
启动 Dify 服务器的最简单方法是运行我们的 [docker-compose.yml](docker/docker-compose.yaml) 文件。在运行安装命令之前,请确保您的机器上安装了 [Docker](https://docs.docker.com/get-docker/) 和 [Docker Compose](https://docs.docker.com/compose/install/):
|
||||
启动 Dify 服务器的最简单方法是运行我们的 [docker-compose.yml](../../docker/docker-compose.yaml) 文件。在运行安装命令之前,请确保您的机器上安装了 [Docker](https://docs.docker.com/get-docker/) 和 [Docker Compose](https://docs.docker.com/compose/install/):
|
||||
|
||||
```bash
|
||||
cd docker
|
||||
@@ -123,7 +125,7 @@ docker compose up -d
|
||||
|
||||
### 自定义配置
|
||||
|
||||
如果您需要自定义配置,请参考 [.env.example](docker/.env.example) 文件中的注释,并更新 `.env` 文件中对应的值。此外,您可能需要根据您的具体部署环境和需求对 `docker-compose.yaml` 文件本身进行调整,例如更改镜像版本、端口映射或卷挂载。完成任何更改后,请重新运行 `docker-compose up -d`。您可以在[此处](https://docs.dify.ai/getting-started/install-self-hosted/environments)找到可用环境变量的完整列表。
|
||||
如果您需要自定义配置,请参考 [.env.example](../../docker/.env.example) 文件中的注释,并更新 `.env` 文件中对应的值。此外,您可能需要根据您的具体部署环境和需求对 `docker-compose.yaml` 文件本身进行调整,例如更改镜像版本、端口映射或卷挂载。完成任何更改后,请重新运行 `docker-compose up -d`。您可以在[此处](https://docs.dify.ai/getting-started/install-self-hosted/environments)找到可用环境变量的完整列表。
|
||||
|
||||
#### 使用 Helm Chart 或 Kubernetes 资源清单(YAML)部署
|
||||
|
||||
@@ -180,7 +182,7 @@ docker compose up -d
|
||||
|
||||
## Contributing
|
||||
|
||||
对于那些想要贡献代码的人,请参阅我们的[贡献指南](https://github.com/langgenius/dify/blob/main/CONTRIBUTING/CONTRIBUTING_CN.md)。
|
||||
对于那些想要贡献代码的人,请参阅我们的[贡献指南](./CONTRIBUTING.md)。
|
||||
同时,请考虑通过社交媒体、活动和会议来支持 Dify 的分享。
|
||||
|
||||
> 我们正在寻找贡献者来帮助将 Dify 翻译成除了中文和英文之外的其他语言。如果您有兴趣帮助,请参阅我们的[i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md)获取更多信息,并在我们的[Discord 社区服务器](https://discord.gg/8Tpq4AcN9c)的`global-users`频道中留言。
|
||||
@@ -196,7 +198,7 @@ docker compose up -d
|
||||
我们欢迎您为 Dify 做出贡献,以帮助改善 Dify。包括:提交代码、问题、新想法,或分享您基于 Dify 创建的有趣且有用的 AI 应用程序。同时,我们也欢迎您在不同的活动、会议和社交媒体上分享 Dify。
|
||||
|
||||
- [GitHub Discussion](https://github.com/langgenius/dify/discussions). 👉:分享您的应用程序并与社区交流。
|
||||
- [GitHub Issues](https://github.com/langgenius/dify/issues)。👉:使用 Dify.AI 时遇到的错误和问题,请参阅[贡献指南](../CONTRIBUTING.md)。
|
||||
- [GitHub Issues](https://github.com/langgenius/dify/issues)。👉:使用 Dify.AI 时遇到的错误和问题,请参阅[贡献指南](./CONTRIBUTING.md)。
|
||||
- [电子邮件支持](mailto:hello@dify.ai?subject=%5BGitHub%5DQuestions%20About%20Dify)。👉:关于使用 Dify.AI 的问题。
|
||||
- [Discord](https://discord.gg/FngNHpbcY7)。👉:分享您的应用程序并与社区交流。
|
||||
- [X(Twitter)](https://twitter.com/dify_ai)。👉:分享您的应用程序并与社区交流。
|
||||
@@ -208,4 +210,4 @@ docker compose up -d
|
||||
|
||||
## License
|
||||
|
||||
本仓库遵循 [Dify Open Source License](../LICENSE) 开源协议,该许可证本质上是 Apache 2.0,但有一些额外的限制。
|
||||
本仓库遵循 [Dify Open Source License](../../LICENSE) 开源协议,该许可证本质上是 Apache 2.0,但有一些额外的限制。
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
這份指南與 Dify 一樣,都在持續完善中。如果指南內容有落後於實際專案的情況,還請見諒,也歡迎提供改進建議。
|
||||
|
||||
關於授權部分,請花點時間閱讀我們簡短的[授權和貢獻者協議](../LICENSE)。社群也需遵守[行為準則](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)。
|
||||
關於授權部分,請花點時間閱讀我們簡短的[授權和貢獻者協議](../../LICENSE)。社群也需遵守[行為準則](https://github.com/langgenius/.github/blob/main/CODE_OF_CONDUCT.md)。
|
||||
|
||||
## 開始之前
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||

|
||||

|
||||
|
||||
<p align="center">
|
||||
📌 <a href="https://dify.ai/blog/introducing-dify-workflow-file-upload-a-demo-on-ai-podcast">介紹 Dify 工作流程檔案上傳功能:重現 Google NotebookLM Podcast</a>
|
||||
@@ -39,18 +39,18 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="./README_TW.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="./README_CN.md"><img alt="简体中文版自述文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="./README_JA.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="./README_ES.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="./README_FR.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="./README_KL.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="./README_KR.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="./README_AR.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="./README_TR.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="./README_VI.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="./README_DE.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
<a href="../../README.md"><img alt="README in English" src="https://img.shields.io/badge/English-d9d9d9"></a>
|
||||
<a href="../zh-TW/README.md"><img alt="繁體中文文件" src="https://img.shields.io/badge/繁體中文-d9d9d9"></a>
|
||||
<a href="../zh-CN/README.md"><img alt="简体中文文件" src="https://img.shields.io/badge/简体中文-d9d9d9"></a>
|
||||
<a href="../ja-JP/README.md"><img alt="日本語のREADME" src="https://img.shields.io/badge/日本語-d9d9d9"></a>
|
||||
<a href="../es-ES/README.md"><img alt="README en Español" src="https://img.shields.io/badge/Español-d9d9d9"></a>
|
||||
<a href="../fr-FR/README.md"><img alt="README en Français" src="https://img.shields.io/badge/Français-d9d9d9"></a>
|
||||
<a href="../tlh/README.md"><img alt="README tlhIngan Hol" src="https://img.shields.io/badge/Klingon-d9d9d9"></a>
|
||||
<a href="../ko-KR/README.md"><img alt="README in Korean" src="https://img.shields.io/badge/한국어-d9d9d9"></a>
|
||||
<a href="../ar-SA/README.md"><img alt="README بالعربية" src="https://img.shields.io/badge/العربية-d9d9d9"></a>
|
||||
<a href="../tr-TR/README.md"><img alt="Türkçe README" src="https://img.shields.io/badge/Türkçe-d9d9d9"></a>
|
||||
<a href="../vi-VN/README.md"><img alt="README Tiếng Việt" src="https://img.shields.io/badge/Ti%E1%BA%BFng%20Vi%E1%BB%87t-d9d9d9"></a>
|
||||
<a href="../de-DE/README.md"><img alt="README in Deutsch" src="https://img.shields.io/badge/German-d9d9d9"></a>
|
||||
</p>
|
||||
|
||||
Dify 是一個開源的 LLM 應用程式開發平台。其直觀的界面結合了智能代理工作流程、RAG 管道、代理功能、模型管理、可觀察性功能等,讓您能夠快速從原型進展到生產環境。
|
||||
@@ -64,7 +64,7 @@ Dify 是一個開源的 LLM 應用程式開發平台。其直觀的界面結合
|
||||
|
||||
</br>
|
||||
|
||||
啟動 Dify 伺服器最簡單的方式是透過 [docker compose](docker/docker-compose.yaml)。在使用以下命令運行 Dify 之前,請確保您的機器已安裝 [Docker](https://docs.docker.com/get-docker/) 和 [Docker Compose](https://docs.docker.com/compose/install/):
|
||||
啟動 Dify 伺服器最簡單的方式是透過 [docker compose](../../docker/docker-compose.yaml)。在使用以下命令運行 Dify 之前,請確保您的機器已安裝 [Docker](https://docs.docker.com/get-docker/) 和 [Docker Compose](https://docs.docker.com/compose/install/):
|
||||
|
||||
```bash
|
||||
cd dify
|
||||
@@ -128,7 +128,7 @@ Dify 的所有功能都提供相應的 API,因此您可以輕鬆地將 Dify
|
||||
|
||||
## 進階設定
|
||||
|
||||
如果您需要自定義配置,請參考我們的 [.env.example](docker/.env.example) 文件中的註釋,並在您的 `.env` 文件中更新相應的值。此外,根據您特定的部署環境和需求,您可能需要調整 `docker-compose.yaml` 文件本身,例如更改映像版本、端口映射或卷掛載。進行任何更改後,請重新運行 `docker-compose up -d`。您可以在[這裡](https://docs.dify.ai/getting-started/install-self-hosted/environments)找到可用環境變數的完整列表。
|
||||
如果您需要自定義配置,請參考我們的 [.env.example](../../docker/.env.example) 文件中的註釋,並在您的 `.env` 文件中更新相應的值。此外,根據您特定的部署環境和需求,您可能需要調整 `docker-compose.yaml` 文件本身,例如更改映像版本、端口映射或卷掛載。進行任何更改後,請重新運行 `docker-compose up -d`。您可以在[這裡](https://docs.dify.ai/getting-started/install-self-hosted/environments)找到可用環境變數的完整列表。
|
||||
|
||||
如果您想配置高可用性設置,社區貢獻的 [Helm Charts](https://helm.sh/) 和 Kubernetes 資源清單(YAML)允許在 Kubernetes 上部署 Dify。
|
||||
|
||||
@@ -173,7 +173,7 @@ Dify 的所有功能都提供相應的 API,因此您可以輕鬆地將 Dify
|
||||
|
||||
## 貢獻
|
||||
|
||||
對於想要貢獻程式碼的開發者,請參閱我們的[貢獻指南](https://github.com/langgenius/dify/blob/main/CONTRIBUTING/CONTRIBUTING_TW.md)。
|
||||
對於想要貢獻程式碼的開發者,請參閱我們的[貢獻指南](./CONTRIBUTING.md)。
|
||||
同時,也請考慮透過在社群媒體和各種活動與會議上分享 Dify 來支持我們。
|
||||
|
||||
> 我們正在尋找貢獻者協助將 Dify 翻譯成中文和英文以外的語言。如果您有興趣幫忙,請查看 [i18n README](https://github.com/langgenius/dify/blob/main/web/i18n-config/README.md) 獲取更多資訊,並在我們的 [Discord 社群伺服器](https://discord.gg/8Tpq4AcN9c) 的 `global-users` 頻道留言給我們。
|
||||
@@ -181,7 +181,7 @@ Dify 的所有功能都提供相應的 API,因此您可以輕鬆地將 Dify
|
||||
## 社群與聯絡方式
|
||||
|
||||
- [GitHub Discussion](https://github.com/langgenius/dify/discussions):最適合分享反饋和提問。
|
||||
- [GitHub Issues](https://github.com/langgenius/dify/issues):最適合報告使用 Dify.AI 時遇到的問題和提出功能建議。請參閱我們的[貢獻指南](https://github.com/langgenius/dify/blob/main/CONTRIBUTING.md)。
|
||||
- [GitHub Issues](https://github.com/langgenius/dify/issues):最適合報告使用 Dify.AI 時遇到的問題和提出功能建議。請參閱我們的[貢獻指南](./CONTRIBUTING.md)。
|
||||
- [Discord](https://discord.gg/FngNHpbcY7):最適合分享您的應用程式並與社群互動。
|
||||
- [X(Twitter)](https://twitter.com/dify_ai):最適合分享您的應用程式並與社群互動。
|
||||
|
||||
@@ -201,4 +201,4 @@ Dify 的所有功能都提供相應的 API,因此您可以輕鬆地將 Dify
|
||||
|
||||
## 授權條款
|
||||
|
||||
本代碼庫採用 [Dify 開源授權](../LICENSE),這基本上是 Apache 2.0 授權加上一些額外限制條款。
|
||||
本代碼庫採用 [Dify 開源授權](../../LICENSE),這基本上是 Apache 2.0 授權加上一些額外限制條款。
|
||||
@@ -4,6 +4,7 @@ from dify_client.client import (
|
||||
DifyClient,
|
||||
KnowledgeBaseClient,
|
||||
WorkflowClient,
|
||||
WorkspaceClient,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
@@ -12,4 +13,5 @@ __all__ = [
|
||||
"DifyClient",
|
||||
"KnowledgeBaseClient",
|
||||
"WorkflowClient",
|
||||
"WorkspaceClient",
|
||||
]
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import json
|
||||
from typing import IO, Literal
|
||||
from typing import Literal, Union, Dict, List, Any, Optional, IO
|
||||
|
||||
import requests
|
||||
|
||||
|
||||
@@ -49,6 +50,18 @@ class DifyClient:
|
||||
params = {"user": user}
|
||||
return self._send_request("GET", "/meta", params=params)
|
||||
|
||||
def get_app_info(self):
|
||||
"""Get basic application information including name, description, tags, and mode."""
|
||||
return self._send_request("GET", "/info")
|
||||
|
||||
def get_app_site_info(self):
|
||||
"""Get application site information."""
|
||||
return self._send_request("GET", "/site")
|
||||
|
||||
def get_file_preview(self, file_id: str):
|
||||
"""Get file preview by file ID."""
|
||||
return self._send_request("GET", f"/files/{file_id}/preview")
|
||||
|
||||
|
||||
class CompletionClient(DifyClient):
|
||||
def create_completion_message(
|
||||
@@ -144,6 +157,51 @@ class ChatClient(DifyClient):
|
||||
files = {"file": audio_file}
|
||||
return self._send_request_with_files("POST", "/audio-to-text", data, files)
|
||||
|
||||
# Annotation APIs
|
||||
def annotation_reply_action(
|
||||
self,
|
||||
action: Literal["enable", "disable"],
|
||||
score_threshold: float,
|
||||
embedding_provider_name: str,
|
||||
embedding_model_name: str,
|
||||
):
|
||||
"""Enable or disable annotation reply feature."""
|
||||
# Backend API requires these fields to be non-None values
|
||||
if score_threshold is None or embedding_provider_name is None or embedding_model_name is None:
|
||||
raise ValueError("score_threshold, embedding_provider_name, and embedding_model_name cannot be None")
|
||||
|
||||
data = {
|
||||
"score_threshold": score_threshold,
|
||||
"embedding_provider_name": embedding_provider_name,
|
||||
"embedding_model_name": embedding_model_name,
|
||||
}
|
||||
return self._send_request("POST", f"/apps/annotation-reply/{action}", json=data)
|
||||
|
||||
def get_annotation_reply_status(self, action: Literal["enable", "disable"], job_id: str):
|
||||
"""Get the status of an annotation reply action job."""
|
||||
return self._send_request("GET", f"/apps/annotation-reply/{action}/status/{job_id}")
|
||||
|
||||
def list_annotations(self, page: int = 1, limit: int = 20, keyword: str = ""):
|
||||
"""List annotations for the application."""
|
||||
params = {"page": page, "limit": limit}
|
||||
if keyword:
|
||||
params["keyword"] = keyword
|
||||
return self._send_request("GET", "/apps/annotations", params=params)
|
||||
|
||||
def create_annotation(self, question: str, answer: str):
|
||||
"""Create a new annotation."""
|
||||
data = {"question": question, "answer": answer}
|
||||
return self._send_request("POST", "/apps/annotations", json=data)
|
||||
|
||||
def update_annotation(self, annotation_id: str, question: str, answer: str):
|
||||
"""Update an existing annotation."""
|
||||
data = {"question": question, "answer": answer}
|
||||
return self._send_request("PUT", f"/apps/annotations/{annotation_id}", json=data)
|
||||
|
||||
def delete_annotation(self, annotation_id: str):
|
||||
"""Delete an annotation."""
|
||||
return self._send_request("DELETE", f"/apps/annotations/{annotation_id}")
|
||||
|
||||
|
||||
class WorkflowClient(DifyClient):
|
||||
def run(self, inputs: dict, response_mode: Literal["blocking", "streaming"] = "streaming", user: str = "abc-123"):
|
||||
@@ -157,6 +215,55 @@ class WorkflowClient(DifyClient):
|
||||
def get_result(self, workflow_run_id):
|
||||
return self._send_request("GET", f"/workflows/run/{workflow_run_id}")
|
||||
|
||||
def get_workflow_logs(
|
||||
self,
|
||||
keyword: str = None,
|
||||
status: Literal["succeeded", "failed", "stopped"] | None = None,
|
||||
page: int = 1,
|
||||
limit: int = 20,
|
||||
created_at__before: str = None,
|
||||
created_at__after: str = None,
|
||||
created_by_end_user_session_id: str = None,
|
||||
created_by_account: str = None,
|
||||
):
|
||||
"""Get workflow execution logs with optional filtering."""
|
||||
params = {"page": page, "limit": limit}
|
||||
if keyword:
|
||||
params["keyword"] = keyword
|
||||
if status:
|
||||
params["status"] = status
|
||||
if created_at__before:
|
||||
params["created_at__before"] = created_at__before
|
||||
if created_at__after:
|
||||
params["created_at__after"] = created_at__after
|
||||
if created_by_end_user_session_id:
|
||||
params["created_by_end_user_session_id"] = created_by_end_user_session_id
|
||||
if created_by_account:
|
||||
params["created_by_account"] = created_by_account
|
||||
return self._send_request("GET", "/workflows/logs", params=params)
|
||||
|
||||
def run_specific_workflow(
|
||||
self,
|
||||
workflow_id: str,
|
||||
inputs: dict,
|
||||
response_mode: Literal["blocking", "streaming"] = "streaming",
|
||||
user: str = "abc-123",
|
||||
):
|
||||
"""Run a specific workflow by workflow ID."""
|
||||
data = {"inputs": inputs, "response_mode": response_mode, "user": user}
|
||||
return self._send_request(
|
||||
"POST", f"/workflows/{workflow_id}/run", data, stream=True if response_mode == "streaming" else False
|
||||
)
|
||||
|
||||
|
||||
class WorkspaceClient(DifyClient):
|
||||
"""Client for workspace-related operations."""
|
||||
|
||||
def get_available_models(self, model_type: str):
|
||||
"""Get available models by model type."""
|
||||
url = f"/workspaces/current/models/model-types/{model_type}"
|
||||
return self._send_request("GET", url)
|
||||
|
||||
|
||||
class KnowledgeBaseClient(DifyClient):
|
||||
def __init__(
|
||||
@@ -443,3 +550,117 @@ class KnowledgeBaseClient(DifyClient):
|
||||
data = {"segment": segment_data}
|
||||
url = f"/datasets/{self._get_dataset_id()}/documents/{document_id}/segments/{segment_id}"
|
||||
return self._send_request("POST", url, json=data, **kwargs)
|
||||
|
||||
# Advanced Knowledge Base APIs
|
||||
def hit_testing(
|
||||
self, query: str, retrieval_model: Dict[str, Any] = None, external_retrieval_model: Dict[str, Any] = None
|
||||
):
|
||||
"""Perform hit testing on the dataset."""
|
||||
data = {"query": query}
|
||||
if retrieval_model:
|
||||
data["retrieval_model"] = retrieval_model
|
||||
if external_retrieval_model:
|
||||
data["external_retrieval_model"] = external_retrieval_model
|
||||
url = f"/datasets/{self._get_dataset_id()}/hit-testing"
|
||||
return self._send_request("POST", url, json=data)
|
||||
|
||||
def get_dataset_metadata(self):
|
||||
"""Get dataset metadata."""
|
||||
url = f"/datasets/{self._get_dataset_id()}/metadata"
|
||||
return self._send_request("GET", url)
|
||||
|
||||
def create_dataset_metadata(self, metadata_data: Dict[str, Any]):
|
||||
"""Create dataset metadata."""
|
||||
url = f"/datasets/{self._get_dataset_id()}/metadata"
|
||||
return self._send_request("POST", url, json=metadata_data)
|
||||
|
||||
def update_dataset_metadata(self, metadata_id: str, metadata_data: Dict[str, Any]):
|
||||
"""Update dataset metadata."""
|
||||
url = f"/datasets/{self._get_dataset_id()}/metadata/{metadata_id}"
|
||||
return self._send_request("PATCH", url, json=metadata_data)
|
||||
|
||||
def get_built_in_metadata(self):
|
||||
"""Get built-in metadata."""
|
||||
url = f"/datasets/{self._get_dataset_id()}/metadata/built-in"
|
||||
return self._send_request("GET", url)
|
||||
|
||||
def manage_built_in_metadata(self, action: str, metadata_data: Dict[str, Any] = None):
|
||||
"""Manage built-in metadata with specified action."""
|
||||
data = metadata_data or {}
|
||||
url = f"/datasets/{self._get_dataset_id()}/metadata/built-in/{action}"
|
||||
return self._send_request("POST", url, json=data)
|
||||
|
||||
def update_documents_metadata(self, operation_data: List[Dict[str, Any]]):
|
||||
"""Update metadata for multiple documents."""
|
||||
url = f"/datasets/{self._get_dataset_id()}/documents/metadata"
|
||||
data = {"operation_data": operation_data}
|
||||
return self._send_request("POST", url, json=data)
|
||||
|
||||
# Dataset Tags APIs
|
||||
def list_dataset_tags(self):
|
||||
"""List all dataset tags."""
|
||||
return self._send_request("GET", "/datasets/tags")
|
||||
|
||||
def bind_dataset_tags(self, tag_ids: List[str]):
|
||||
"""Bind tags to dataset."""
|
||||
data = {"tag_ids": tag_ids, "target_id": self._get_dataset_id()}
|
||||
return self._send_request("POST", "/datasets/tags/binding", json=data)
|
||||
|
||||
def unbind_dataset_tag(self, tag_id: str):
|
||||
"""Unbind a single tag from dataset."""
|
||||
data = {"tag_id": tag_id, "target_id": self._get_dataset_id()}
|
||||
return self._send_request("POST", "/datasets/tags/unbinding", json=data)
|
||||
|
||||
def get_dataset_tags(self):
|
||||
"""Get tags for current dataset."""
|
||||
url = f"/datasets/{self._get_dataset_id()}/tags"
|
||||
return self._send_request("GET", url)
|
||||
|
||||
# RAG Pipeline APIs
|
||||
def get_datasource_plugins(self, is_published: bool = True):
|
||||
"""Get datasource plugins for RAG pipeline."""
|
||||
params = {"is_published": is_published}
|
||||
url = f"/datasets/{self._get_dataset_id()}/pipeline/datasource-plugins"
|
||||
return self._send_request("GET", url, params=params)
|
||||
|
||||
def run_datasource_node(
|
||||
self,
|
||||
node_id: str,
|
||||
inputs: Dict[str, Any],
|
||||
datasource_type: str,
|
||||
is_published: bool = True,
|
||||
credential_id: str = None,
|
||||
):
|
||||
"""Run a datasource node in RAG pipeline."""
|
||||
data = {"inputs": inputs, "datasource_type": datasource_type, "is_published": is_published}
|
||||
if credential_id:
|
||||
data["credential_id"] = credential_id
|
||||
url = f"/datasets/{self._get_dataset_id()}/pipeline/datasource/nodes/{node_id}/run"
|
||||
return self._send_request("POST", url, json=data, stream=True)
|
||||
|
||||
def run_rag_pipeline(
|
||||
self,
|
||||
inputs: Dict[str, Any],
|
||||
datasource_type: str,
|
||||
datasource_info_list: List[Dict[str, Any]],
|
||||
start_node_id: str,
|
||||
is_published: bool = True,
|
||||
response_mode: Literal["streaming", "blocking"] = "blocking",
|
||||
):
|
||||
"""Run RAG pipeline."""
|
||||
data = {
|
||||
"inputs": inputs,
|
||||
"datasource_type": datasource_type,
|
||||
"datasource_info_list": datasource_info_list,
|
||||
"start_node_id": start_node_id,
|
||||
"is_published": is_published,
|
||||
"response_mode": response_mode,
|
||||
}
|
||||
url = f"/datasets/{self._get_dataset_id()}/pipeline/run"
|
||||
return self._send_request("POST", url, json=data, stream=response_mode == "streaming")
|
||||
|
||||
def upload_pipeline_file(self, file_path: str):
|
||||
"""Upload file for RAG pipeline."""
|
||||
with open(file_path, "rb") as f:
|
||||
files = {"file": f}
|
||||
return self._send_request_with_files("POST", "/datasets/pipeline/file-upload", {}, files)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user