feat: mypy for all type check (#10921)

This commit is contained in:
yihong
2024-12-24 18:38:51 +08:00
committed by GitHub
parent c91e8b1737
commit 56e15d09a9
584 changed files with 3975 additions and 2826 deletions

0
api/tasks/__init__.py Normal file
View File

View File

@@ -3,7 +3,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from werkzeug.exceptions import NotFound
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory

View File

@@ -2,7 +2,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from core.rag.datasource.vdb.vector_factory import Vector
from core.rag.models.document import Document

View File

@@ -2,7 +2,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from werkzeug.exceptions import NotFound
from core.rag.datasource.vdb.vector_factory import Vector

View File

@@ -2,7 +2,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from core.rag.datasource.vdb.vector_factory import Vector
from models.dataset import Dataset

View File

@@ -2,7 +2,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from werkzeug.exceptions import NotFound
from core.rag.datasource.vdb.vector_factory import Vector

View File

@@ -3,7 +3,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from werkzeug.exceptions import NotFound
from core.rag.datasource.vdb.vector_factory import Vector

View File

@@ -2,7 +2,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from core.rag.datasource.vdb.vector_factory import Vector
from core.rag.models.document import Document

View File

@@ -4,7 +4,7 @@ import time
import uuid
import click
from celery import shared_task
from celery import shared_task # type: ignore
from sqlalchemy import func
from core.indexing_runner import IndexingRunner
@@ -58,12 +58,13 @@ def batch_create_segment_to_index_task(
model=dataset.embedding_model,
)
word_count_change = 0
segments_to_insert: list[str] = [] # Explicitly type hint the list as List[str]
for segment in content:
content = segment["content"]
content_str = segment["content"]
doc_id = str(uuid.uuid4())
segment_hash = helper.generate_text_hash(content)
segment_hash = helper.generate_text_hash(content_str)
# calc embedding use tokens
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content]) if embedding_model else 0
tokens = embedding_model.get_text_embedding_num_tokens(texts=[content_str]) if embedding_model else 0
max_position = (
db.session.query(func.max(DocumentSegment.position))
.filter(DocumentSegment.document_id == dataset_document.id)
@@ -90,6 +91,7 @@ def batch_create_segment_to_index_task(
word_count_change += segment_document.word_count
db.session.add(segment_document)
document_segments.append(segment_document)
segments_to_insert.append(str(segment)) # Cast to string if needed
# update document word count
dataset_document.word_count += word_count_change
db.session.add(dataset_document)

View File

@@ -2,7 +2,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from core.tools.utils.web_reader_tool import get_image_upload_file_ids
@@ -71,6 +71,8 @@ def clean_dataset_task(
image_upload_file_ids = get_image_upload_file_ids(segment.content)
for upload_file_id in image_upload_file_ids:
image_file = db.session.query(UploadFile).filter(UploadFile.id == upload_file_id).first()
if image_file is None:
continue
try:
storage.delete(image_file.key)
except Exception:

View File

@@ -3,7 +3,7 @@ import time
from typing import Optional
import click
from celery import shared_task
from celery import shared_task # type: ignore
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from core.tools.utils.web_reader_tool import get_image_upload_file_ids
@@ -44,6 +44,8 @@ def clean_document_task(document_id: str, dataset_id: str, doc_form: str, file_i
image_upload_file_ids = get_image_upload_file_ids(segment.content)
for upload_file_id in image_upload_file_ids:
image_file = db.session.query(UploadFile).filter(UploadFile.id == upload_file_id).first()
if image_file is None:
continue
try:
storage.delete(image_file.key)
except Exception:

View File

@@ -2,7 +2,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from extensions.ext_database import db

View File

@@ -4,7 +4,7 @@ import time
from typing import Optional
import click
from celery import shared_task
from celery import shared_task # type: ignore
from werkzeug.exceptions import NotFound
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory

View File

@@ -2,7 +2,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from core.rag.models.document import Document

View File

@@ -2,7 +2,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
from extensions.ext_database import db

View File

@@ -2,7 +2,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from werkzeug.exceptions import NotFound
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory

View File

@@ -3,7 +3,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from werkzeug.exceptions import NotFound
from core.indexing_runner import DocumentIsPausedError, IndexingRunner

View File

@@ -3,7 +3,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from configs import dify_config
from core.indexing_runner import DocumentIsPausedError, IndexingRunner

View File

@@ -3,7 +3,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from werkzeug.exceptions import NotFound
from core.indexing_runner import DocumentIsPausedError, IndexingRunner

View File

@@ -3,7 +3,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from configs import dify_config
from core.indexing_runner import DocumentIsPausedError, IndexingRunner
@@ -26,6 +26,8 @@ def duplicate_document_indexing_task(dataset_id: str, document_ids: list):
start_at = time.perf_counter()
dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
if dataset is None:
raise ValueError("Dataset not found")
# check document limit
features = FeatureService.get_features(dataset.tenant_id)

View File

@@ -3,7 +3,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from werkzeug.exceptions import NotFound
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory

View File

@@ -2,7 +2,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from flask import render_template
from extensions.ext_mail import mail

View File

@@ -2,7 +2,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from flask import render_template
from configs import dify_config

View File

@@ -2,7 +2,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from flask import render_template
from extensions.ext_mail import mail

View File

@@ -1,7 +1,7 @@
import json
import logging
from celery import shared_task
from celery import shared_task # type: ignore
from flask import current_app
from core.ops.entities.config_entity import OPS_FILE_PATH, OPS_TRACE_FAILED_KEY

View File

@@ -2,7 +2,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from werkzeug.exceptions import NotFound
from core.indexing_runner import DocumentIsPausedError, IndexingRunner

View File

@@ -3,7 +3,7 @@ import time
from collections.abc import Callable
import click
from celery import shared_task
from celery import shared_task # type: ignore
from sqlalchemy import delete
from sqlalchemy.exc import SQLAlchemyError

View File

@@ -2,7 +2,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from werkzeug.exceptions import NotFound
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory

View File

@@ -3,7 +3,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from core.indexing_runner import IndexingRunner
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
@@ -22,10 +22,13 @@ def retry_document_indexing_task(dataset_id: str, document_ids: list[str]):
Usage: retry_document_indexing_task.delay(dataset_id, document_id)
"""
documents = []
documents: list[Document] = []
start_at = time.perf_counter()
dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
if not dataset:
raise ValueError("Dataset not found")
for document_id in document_ids:
retry_indexing_cache_key = "document_{}_is_retried".format(document_id)
# check document limit
@@ -55,29 +58,31 @@ def retry_document_indexing_task(dataset_id: str, document_ids: list[str]):
document = (
db.session.query(Document).filter(Document.id == document_id, Document.dataset_id == dataset_id).first()
)
if not document:
logging.info(click.style("Document not found: {}".format(document_id), fg="yellow"))
return
try:
if document:
# clean old data
index_processor = IndexProcessorFactory(document.doc_form).init_index_processor()
# clean old data
index_processor = IndexProcessorFactory(document.doc_form).init_index_processor()
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
if segments:
index_node_ids = [segment.index_node_id for segment in segments]
# delete from vector index
index_processor.clean(dataset, index_node_ids)
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
if segments:
index_node_ids = [segment.index_node_id for segment in segments]
# delete from vector index
index_processor.clean(dataset, index_node_ids)
for segment in segments:
db.session.delete(segment)
db.session.commit()
document.indexing_status = "parsing"
document.processing_started_at = datetime.datetime.utcnow()
db.session.add(document)
for segment in segments:
db.session.delete(segment)
db.session.commit()
indexing_runner = IndexingRunner()
indexing_runner.run([document])
redis_client.delete(retry_indexing_cache_key)
document.indexing_status = "parsing"
document.processing_started_at = datetime.datetime.utcnow()
db.session.add(document)
db.session.commit()
indexing_runner = IndexingRunner()
indexing_runner.run([document])
redis_client.delete(retry_indexing_cache_key)
except Exception as ex:
document.indexing_status = "error"
document.error = str(ex)

View File

@@ -3,7 +3,7 @@ import logging
import time
import click
from celery import shared_task
from celery import shared_task # type: ignore
from core.indexing_runner import IndexingRunner
from core.rag.index_processor.index_processor_factory import IndexProcessorFactory
@@ -25,6 +25,8 @@ def sync_website_document_indexing_task(dataset_id: str, document_id: str):
start_at = time.perf_counter()
dataset = db.session.query(Dataset).filter(Dataset.id == dataset_id).first()
if dataset is None:
raise ValueError("Dataset not found")
sync_indexing_cache_key = "document_{}_is_sync".format(document_id)
# check document limit
@@ -52,29 +54,31 @@ def sync_website_document_indexing_task(dataset_id: str, document_id: str):
logging.info(click.style("Start sync website document: {}".format(document_id), fg="green"))
document = db.session.query(Document).filter(Document.id == document_id, Document.dataset_id == dataset_id).first()
if not document:
logging.info(click.style("Document not found: {}".format(document_id), fg="yellow"))
return
try:
if document:
# clean old data
index_processor = IndexProcessorFactory(document.doc_form).init_index_processor()
# clean old data
index_processor = IndexProcessorFactory(document.doc_form).init_index_processor()
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
if segments:
index_node_ids = [segment.index_node_id for segment in segments]
# delete from vector index
index_processor.clean(dataset, index_node_ids)
segments = db.session.query(DocumentSegment).filter(DocumentSegment.document_id == document_id).all()
if segments:
index_node_ids = [segment.index_node_id for segment in segments]
# delete from vector index
index_processor.clean(dataset, index_node_ids)
for segment in segments:
db.session.delete(segment)
db.session.commit()
document.indexing_status = "parsing"
document.processing_started_at = datetime.datetime.utcnow()
db.session.add(document)
for segment in segments:
db.session.delete(segment)
db.session.commit()
indexing_runner = IndexingRunner()
indexing_runner.run([document])
redis_client.delete(sync_indexing_cache_key)
document.indexing_status = "parsing"
document.processing_started_at = datetime.datetime.utcnow()
db.session.add(document)
db.session.commit()
indexing_runner = IndexingRunner()
indexing_runner.run([document])
redis_client.delete(sync_indexing_cache_key)
except Exception as ex:
document.indexing_status = "error"
document.error = str(ex)