mirror of
https://github.com/langgenius/dify.git
synced 2025-12-25 10:01:31 -05:00
make logging not use f-str, change others to f-str (#22882)
This commit is contained in:
@@ -600,5 +600,5 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
|
||||
if len(e.args) > 0 and e.args[0] == "I/O operation on closed file.": # ignore this error
|
||||
raise GenerateTaskStoppedError()
|
||||
else:
|
||||
logger.exception(f"Failed to process generate task pipeline, conversation_id: {conversation.id}")
|
||||
logger.exception("Failed to process generate task pipeline, conversation_id: %s", conversation.id)
|
||||
raise e
|
||||
|
||||
@@ -271,7 +271,7 @@ class AdvancedChatAppGenerateTaskPipeline:
|
||||
start_listener_time = time.time()
|
||||
yield MessageAudioStreamResponse(audio=audio_trunk.audio, task_id=task_id)
|
||||
except Exception:
|
||||
logger.exception(f"Failed to listen audio message, task_id: {task_id}")
|
||||
logger.exception("Failed to listen audio message, task_id: %s", task_id)
|
||||
break
|
||||
if tts_publisher:
|
||||
yield MessageAudioEndStreamResponse(audio="", task_id=task_id)
|
||||
|
||||
@@ -78,7 +78,7 @@ class MessageBasedAppGenerator(BaseAppGenerator):
|
||||
if len(e.args) > 0 and e.args[0] == "I/O operation on closed file.": # ignore this error
|
||||
raise GenerateTaskStoppedError()
|
||||
else:
|
||||
logger.exception(f"Failed to handle response, conversation_id: {conversation.id}")
|
||||
logger.exception("Failed to handle response, conversation_id: %s", conversation.id)
|
||||
raise e
|
||||
|
||||
def _get_app_model_config(self, app_model: App, conversation: Optional[Conversation] = None) -> AppModelConfig:
|
||||
|
||||
@@ -483,7 +483,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
try:
|
||||
runner.run()
|
||||
except GenerateTaskStoppedError as e:
|
||||
logger.warning(f"Task stopped: {str(e)}")
|
||||
logger.warning("Task stopped: %s", str(e))
|
||||
pass
|
||||
except InvokeAuthorizationError:
|
||||
queue_manager.publish_error(
|
||||
@@ -540,6 +540,6 @@ class WorkflowAppGenerator(BaseAppGenerator):
|
||||
raise GenerateTaskStoppedError()
|
||||
else:
|
||||
logger.exception(
|
||||
f"Fails to process generate task pipeline, task_id: {application_generate_entity.task_id}"
|
||||
"Fails to process generate task pipeline, task_id: %s", application_generate_entity.task_id
|
||||
)
|
||||
raise e
|
||||
|
||||
@@ -246,7 +246,7 @@ class WorkflowAppGenerateTaskPipeline:
|
||||
else:
|
||||
yield MessageAudioStreamResponse(audio=audio_trunk.audio, task_id=task_id)
|
||||
except Exception:
|
||||
logger.exception(f"Fails to get audio trunk, task_id: {task_id}")
|
||||
logger.exception("Fails to get audio trunk, task_id: %s", task_id)
|
||||
break
|
||||
if tts_publisher:
|
||||
yield MessageAudioEndStreamResponse(audio="", task_id=task_id)
|
||||
|
||||
@@ -83,7 +83,7 @@ class AnnotationReplyFeature:
|
||||
|
||||
return annotation
|
||||
except Exception as e:
|
||||
logger.warning(f"Query annotation failed, exception: {str(e)}.")
|
||||
logger.warning("Query annotation failed, exception: %s.", str(e))
|
||||
return None
|
||||
|
||||
return None
|
||||
|
||||
@@ -97,7 +97,7 @@ class MessageCycleManager:
|
||||
conversation.name = name
|
||||
except Exception as e:
|
||||
if dify_config.DEBUG:
|
||||
logging.exception(f"generate conversation name failed, conversation_id: {conversation_id}")
|
||||
logging.exception("generate conversation name failed, conversation_id: %s", conversation_id)
|
||||
pass
|
||||
|
||||
db.session.merge(conversation)
|
||||
|
||||
@@ -900,7 +900,7 @@ class ProviderConfiguration(BaseModel):
|
||||
credentials=copy_credentials,
|
||||
)
|
||||
except Exception as ex:
|
||||
logger.warning(f"get custom model schema failed, {ex}")
|
||||
logger.warning("get custom model schema failed, %s", ex)
|
||||
continue
|
||||
|
||||
if not custom_model_schema:
|
||||
@@ -1009,7 +1009,7 @@ class ProviderConfiguration(BaseModel):
|
||||
credentials=model_configuration.credentials,
|
||||
)
|
||||
except Exception as ex:
|
||||
logger.warning(f"get custom model schema failed, {ex}")
|
||||
logger.warning("get custom model schema failed, %s", ex)
|
||||
continue
|
||||
|
||||
if not custom_model_schema:
|
||||
|
||||
@@ -22,7 +22,7 @@ class APIBasedExtensionRequestor:
|
||||
:param params: the request params
|
||||
:return: the response json
|
||||
"""
|
||||
headers = {"Content-Type": "application/json", "Authorization": "Bearer {}".format(self.api_key)}
|
||||
headers = {"Content-Type": "application/json", "Authorization": f"Bearer {self.api_key}"}
|
||||
|
||||
url = self.api_endpoint
|
||||
|
||||
@@ -49,8 +49,6 @@ class APIBasedExtensionRequestor:
|
||||
raise ValueError("request connection error")
|
||||
|
||||
if response.status_code != 200:
|
||||
raise ValueError(
|
||||
"request error, status_code: {}, content: {}".format(response.status_code, response.text[:100])
|
||||
)
|
||||
raise ValueError(f"request error, status_code: {response.status_code}, content: {response.text[:100]}")
|
||||
|
||||
return cast(dict, response.json())
|
||||
|
||||
@@ -66,7 +66,7 @@ class Extensible:
|
||||
|
||||
# Check for extension module file
|
||||
if (extension_name + ".py") not in file_names:
|
||||
logging.warning(f"Missing {extension_name}.py file in {subdir_path}, Skip.")
|
||||
logging.warning("Missing %s.py file in %s, Skip.", extension_name, subdir_path)
|
||||
continue
|
||||
|
||||
# Check for builtin flag and position
|
||||
@@ -95,7 +95,7 @@ class Extensible:
|
||||
break
|
||||
|
||||
if not extension_class:
|
||||
logging.warning(f"Missing subclass of {cls.__name__} in {module_name}, Skip.")
|
||||
logging.warning("Missing subclass of %s in %s, Skip.", cls.__name__, module_name)
|
||||
continue
|
||||
|
||||
# Load schema if not builtin
|
||||
@@ -103,7 +103,7 @@ class Extensible:
|
||||
if not builtin:
|
||||
json_path = os.path.join(subdir_path, "schema.json")
|
||||
if not os.path.exists(json_path):
|
||||
logging.warning(f"Missing schema.json file in {subdir_path}, Skip.")
|
||||
logging.warning("Missing schema.json file in %s, Skip.", subdir_path)
|
||||
continue
|
||||
|
||||
with open(json_path, encoding="utf-8") as f:
|
||||
|
||||
@@ -49,7 +49,7 @@ class ApiExternalDataTool(ExternalDataTool):
|
||||
"""
|
||||
# get params from config
|
||||
if not self.config:
|
||||
raise ValueError("config is required, config: {}".format(self.config))
|
||||
raise ValueError(f"config is required, config: {self.config}")
|
||||
api_based_extension_id = self.config.get("api_based_extension_id")
|
||||
assert api_based_extension_id is not None, "api_based_extension_id is required"
|
||||
|
||||
@@ -74,7 +74,7 @@ class ApiExternalDataTool(ExternalDataTool):
|
||||
# request api
|
||||
requestor = APIBasedExtensionRequestor(api_endpoint=api_based_extension.api_endpoint, api_key=api_key)
|
||||
except Exception as e:
|
||||
raise ValueError("[External data tool] API query failed, variable: {}, error: {}".format(self.variable, e))
|
||||
raise ValueError(f"[External data tool] API query failed, variable: {self.variable}, error: {e}")
|
||||
|
||||
response_json = requestor.request(
|
||||
point=APIBasedExtensionPoint.APP_EXTERNAL_DATA_TOOL_QUERY,
|
||||
@@ -90,7 +90,7 @@ class ApiExternalDataTool(ExternalDataTool):
|
||||
|
||||
if not isinstance(response_json["result"], str):
|
||||
raise ValueError(
|
||||
"[External data tool] API query failed, variable: {}, error: result is not string".format(self.variable)
|
||||
f"[External data tool] API query failed, variable: {self.variable}, error: result is not string"
|
||||
)
|
||||
|
||||
return response_json["result"]
|
||||
|
||||
@@ -55,7 +55,7 @@ def check_moderation(tenant_id: str, model_config: ModelConfigWithCredentialsEnt
|
||||
if moderation_result is True:
|
||||
return True
|
||||
except Exception:
|
||||
logger.exception(f"Fails to check moderation, provider_name: {provider_name}")
|
||||
logger.exception("Fails to check moderation, provider_name: %s", provider_name)
|
||||
raise InvokeBadRequestError("Rate limit exceeded, please try again later.")
|
||||
|
||||
return False
|
||||
|
||||
@@ -30,7 +30,7 @@ def import_module_from_source(*, module_name: str, py_file_path: AnyStr, use_laz
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
except Exception as e:
|
||||
logging.exception(f"Failed to load module {module_name} from script file '{py_file_path!r}'")
|
||||
logging.exception("Failed to load module %s from script file '%s'", module_name, repr(py_file_path))
|
||||
raise e
|
||||
|
||||
|
||||
|
||||
@@ -73,10 +73,12 @@ def make_request(method, url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs):
|
||||
if response.status_code not in STATUS_FORCELIST:
|
||||
return response
|
||||
else:
|
||||
logging.warning(f"Received status code {response.status_code} for URL {url} which is in the force list")
|
||||
logging.warning(
|
||||
"Received status code %s for URL %s which is in the force list", response.status_code, url
|
||||
)
|
||||
|
||||
except httpx.RequestError as e:
|
||||
logging.warning(f"Request to URL {url} failed on attempt {retries + 1}: {e}")
|
||||
logging.warning("Request to URL %s failed on attempt %s: %s", url, retries + 1, e)
|
||||
if max_retries == 0:
|
||||
raise
|
||||
|
||||
|
||||
@@ -84,14 +84,14 @@ class IndexingRunner:
|
||||
documents=documents,
|
||||
)
|
||||
except DocumentIsPausedError:
|
||||
raise DocumentIsPausedError("Document paused, document id: {}".format(dataset_document.id))
|
||||
raise DocumentIsPausedError(f"Document paused, document id: {dataset_document.id}")
|
||||
except ProviderTokenNotInitError as e:
|
||||
dataset_document.indexing_status = "error"
|
||||
dataset_document.error = str(e.description)
|
||||
dataset_document.stopped_at = datetime.datetime.now(datetime.UTC).replace(tzinfo=None)
|
||||
db.session.commit()
|
||||
except ObjectDeletedError:
|
||||
logging.warning("Document deleted, document id: {}".format(dataset_document.id))
|
||||
logging.warning("Document deleted, document id: %s", dataset_document.id)
|
||||
except Exception as e:
|
||||
logging.exception("consume document failed")
|
||||
dataset_document.indexing_status = "error"
|
||||
@@ -147,7 +147,7 @@ class IndexingRunner:
|
||||
index_processor=index_processor, dataset=dataset, dataset_document=dataset_document, documents=documents
|
||||
)
|
||||
except DocumentIsPausedError:
|
||||
raise DocumentIsPausedError("Document paused, document id: {}".format(dataset_document.id))
|
||||
raise DocumentIsPausedError(f"Document paused, document id: {dataset_document.id}")
|
||||
except ProviderTokenNotInitError as e:
|
||||
dataset_document.indexing_status = "error"
|
||||
dataset_document.error = str(e.description)
|
||||
@@ -222,7 +222,7 @@ class IndexingRunner:
|
||||
index_processor=index_processor, dataset=dataset, dataset_document=dataset_document, documents=documents
|
||||
)
|
||||
except DocumentIsPausedError:
|
||||
raise DocumentIsPausedError("Document paused, document id: {}".format(dataset_document.id))
|
||||
raise DocumentIsPausedError(f"Document paused, document id: {dataset_document.id}")
|
||||
except ProviderTokenNotInitError as e:
|
||||
dataset_document.indexing_status = "error"
|
||||
dataset_document.error = str(e.description)
|
||||
@@ -324,7 +324,8 @@ class IndexingRunner:
|
||||
except Exception:
|
||||
logging.exception(
|
||||
"Delete image_files failed while indexing_estimate, \
|
||||
image_upload_file_is: {}".format(upload_file_id)
|
||||
image_upload_file_is: %s",
|
||||
upload_file_id,
|
||||
)
|
||||
db.session.delete(image_file)
|
||||
|
||||
@@ -649,7 +650,7 @@ class IndexingRunner:
|
||||
|
||||
@staticmethod
|
||||
def _check_document_paused_status(document_id: str):
|
||||
indexing_cache_key = "document_{}_is_paused".format(document_id)
|
||||
indexing_cache_key = f"document_{document_id}_is_paused"
|
||||
result = redis_client.get(indexing_cache_key)
|
||||
if result:
|
||||
raise DocumentIsPausedError()
|
||||
|
||||
@@ -167,7 +167,7 @@ class LLMGenerator:
|
||||
error = str(e)
|
||||
error_step = "generate rule config"
|
||||
except Exception as e:
|
||||
logging.exception(f"Failed to generate rule config, model: {model_config.get('name')}")
|
||||
logging.exception("Failed to generate rule config, model: %s", model_config.get("name"))
|
||||
rule_config["error"] = str(e)
|
||||
|
||||
rule_config["error"] = f"Failed to {error_step}. Error: {error}" if error else ""
|
||||
@@ -264,7 +264,7 @@ class LLMGenerator:
|
||||
error_step = "generate conversation opener"
|
||||
|
||||
except Exception as e:
|
||||
logging.exception(f"Failed to generate rule config, model: {model_config.get('name')}")
|
||||
logging.exception("Failed to generate rule config, model: %s", model_config.get("name"))
|
||||
rule_config["error"] = str(e)
|
||||
|
||||
rule_config["error"] = f"Failed to {error_step}. Error: {error}" if error else ""
|
||||
@@ -314,7 +314,7 @@ class LLMGenerator:
|
||||
return {"code": "", "language": code_language, "error": f"Failed to generate code. Error: {error}"}
|
||||
except Exception as e:
|
||||
logging.exception(
|
||||
f"Failed to invoke LLM model, model: {model_config.get('name')}, language: {code_language}"
|
||||
"Failed to invoke LLM model, model: %s, language: %s", model_config.get("name"), code_language
|
||||
)
|
||||
return {"code": "", "language": code_language, "error": f"An unexpected error occurred: {str(e)}"}
|
||||
|
||||
@@ -386,5 +386,5 @@ class LLMGenerator:
|
||||
error = str(e)
|
||||
return {"output": "", "error": f"Failed to generate JSON Schema. Error: {error}"}
|
||||
except Exception as e:
|
||||
logging.exception(f"Failed to invoke LLM model, model: {model_config.get('name')}")
|
||||
logging.exception("Failed to invoke LLM model, model: %s", model_config.get("name"))
|
||||
return {"output": "", "error": f"An unexpected error occurred: {str(e)}"}
|
||||
|
||||
@@ -88,7 +88,7 @@ class SSETransport:
|
||||
status_queue: Queue to put status updates.
|
||||
"""
|
||||
endpoint_url = urljoin(self.url, sse_data)
|
||||
logger.info(f"Received endpoint URL: {endpoint_url}")
|
||||
logger.info("Received endpoint URL: %s", endpoint_url)
|
||||
|
||||
if not self._validate_endpoint_url(endpoint_url):
|
||||
error_msg = f"Endpoint origin does not match connection origin: {endpoint_url}"
|
||||
@@ -107,7 +107,7 @@ class SSETransport:
|
||||
"""
|
||||
try:
|
||||
message = types.JSONRPCMessage.model_validate_json(sse_data)
|
||||
logger.debug(f"Received server message: {message}")
|
||||
logger.debug("Received server message: %s", message)
|
||||
session_message = SessionMessage(message)
|
||||
read_queue.put(session_message)
|
||||
except Exception as exc:
|
||||
@@ -128,7 +128,7 @@ class SSETransport:
|
||||
case "message":
|
||||
self._handle_message_event(sse.data, read_queue)
|
||||
case _:
|
||||
logger.warning(f"Unknown SSE event: {sse.event}")
|
||||
logger.warning("Unknown SSE event: %s", sse.event)
|
||||
|
||||
def sse_reader(self, event_source, read_queue: ReadQueue, status_queue: StatusQueue) -> None:
|
||||
"""Read and process SSE events.
|
||||
@@ -142,7 +142,7 @@ class SSETransport:
|
||||
for sse in event_source.iter_sse():
|
||||
self._handle_sse_event(sse, read_queue, status_queue)
|
||||
except httpx.ReadError as exc:
|
||||
logger.debug(f"SSE reader shutting down normally: {exc}")
|
||||
logger.debug("SSE reader shutting down normally: %s", exc)
|
||||
except Exception as exc:
|
||||
read_queue.put(exc)
|
||||
finally:
|
||||
@@ -165,7 +165,7 @@ class SSETransport:
|
||||
),
|
||||
)
|
||||
response.raise_for_status()
|
||||
logger.debug(f"Client message sent successfully: {response.status_code}")
|
||||
logger.debug("Client message sent successfully: %s", response.status_code)
|
||||
|
||||
def post_writer(self, client: httpx.Client, endpoint_url: str, write_queue: WriteQueue) -> None:
|
||||
"""Handle writing messages to the server.
|
||||
@@ -190,7 +190,7 @@ class SSETransport:
|
||||
except queue.Empty:
|
||||
continue
|
||||
except httpx.ReadError as exc:
|
||||
logger.debug(f"Post writer shutting down normally: {exc}")
|
||||
logger.debug("Post writer shutting down normally: %s", exc)
|
||||
except Exception as exc:
|
||||
logger.exception("Error writing messages")
|
||||
write_queue.put(exc)
|
||||
@@ -326,7 +326,7 @@ def send_message(http_client: httpx.Client, endpoint_url: str, session_message:
|
||||
),
|
||||
)
|
||||
response.raise_for_status()
|
||||
logger.debug(f"Client message sent successfully: {response.status_code}")
|
||||
logger.debug("Client message sent successfully: %s", response.status_code)
|
||||
except Exception as exc:
|
||||
logger.exception("Error sending message")
|
||||
raise
|
||||
@@ -349,13 +349,13 @@ def read_messages(
|
||||
if sse.event == "message":
|
||||
try:
|
||||
message = types.JSONRPCMessage.model_validate_json(sse.data)
|
||||
logger.debug(f"Received server message: {message}")
|
||||
logger.debug("Received server message: %s", message)
|
||||
yield SessionMessage(message)
|
||||
except Exception as exc:
|
||||
logger.exception("Error parsing server message")
|
||||
yield exc
|
||||
else:
|
||||
logger.warning(f"Unknown SSE event: {sse.event}")
|
||||
logger.warning("Unknown SSE event: %s", sse.event)
|
||||
except Exception as exc:
|
||||
logger.exception("Error reading SSE messages")
|
||||
yield exc
|
||||
|
||||
@@ -129,7 +129,7 @@ class StreamableHTTPTransport:
|
||||
new_session_id = response.headers.get(MCP_SESSION_ID)
|
||||
if new_session_id:
|
||||
self.session_id = new_session_id
|
||||
logger.info(f"Received session ID: {self.session_id}")
|
||||
logger.info("Received session ID: %s", self.session_id)
|
||||
|
||||
def _handle_sse_event(
|
||||
self,
|
||||
@@ -142,7 +142,7 @@ class StreamableHTTPTransport:
|
||||
if sse.event == "message":
|
||||
try:
|
||||
message = JSONRPCMessage.model_validate_json(sse.data)
|
||||
logger.debug(f"SSE message: {message}")
|
||||
logger.debug("SSE message: %s", message)
|
||||
|
||||
# If this is a response and we have original_request_id, replace it
|
||||
if original_request_id is not None and isinstance(message.root, JSONRPCResponse | JSONRPCError):
|
||||
@@ -168,7 +168,7 @@ class StreamableHTTPTransport:
|
||||
logger.debug("Received ping event")
|
||||
return False
|
||||
else:
|
||||
logger.warning(f"Unknown SSE event: {sse.event}")
|
||||
logger.warning("Unknown SSE event: %s", sse.event)
|
||||
return False
|
||||
|
||||
def handle_get_stream(
|
||||
@@ -197,7 +197,7 @@ class StreamableHTTPTransport:
|
||||
self._handle_sse_event(sse, server_to_client_queue)
|
||||
|
||||
except Exception as exc:
|
||||
logger.debug(f"GET stream error (non-fatal): {exc}")
|
||||
logger.debug("GET stream error (non-fatal): %s", exc)
|
||||
|
||||
def _handle_resumption_request(self, ctx: RequestContext) -> None:
|
||||
"""Handle a resumption request using GET with SSE."""
|
||||
@@ -352,7 +352,7 @@ class StreamableHTTPTransport:
|
||||
# Check if this is a resumption request
|
||||
is_resumption = bool(metadata and metadata.resumption_token)
|
||||
|
||||
logger.debug(f"Sending client message: {message}")
|
||||
logger.debug("Sending client message: %s", message)
|
||||
|
||||
# Handle initialized notification
|
||||
if self._is_initialized_notification(message):
|
||||
@@ -389,9 +389,9 @@ class StreamableHTTPTransport:
|
||||
if response.status_code == 405:
|
||||
logger.debug("Server does not allow session termination")
|
||||
elif response.status_code != 200:
|
||||
logger.warning(f"Session termination failed: {response.status_code}")
|
||||
logger.warning("Session termination failed: %s", response.status_code)
|
||||
except Exception as exc:
|
||||
logger.warning(f"Session termination failed: {exc}")
|
||||
logger.warning("Session termination failed: %s", exc)
|
||||
|
||||
def get_session_id(self) -> str | None:
|
||||
"""Get the current session ID."""
|
||||
|
||||
@@ -75,7 +75,7 @@ class MCPClient:
|
||||
self.connect_server(client_factory, method_name)
|
||||
else:
|
||||
try:
|
||||
logger.debug(f"Not supported method {method_name} found in URL path, trying default 'mcp' method.")
|
||||
logger.debug("Not supported method %s found in URL path, trying default 'mcp' method.", method_name)
|
||||
self.connect_server(sse_client, "sse")
|
||||
except MCPConnectionError:
|
||||
logger.debug("MCP connection failed with 'sse', falling back to 'mcp' method.")
|
||||
|
||||
@@ -368,7 +368,7 @@ class BaseSession(
|
||||
self._handle_incoming(notification)
|
||||
except Exception as e:
|
||||
# For other validation errors, log and continue
|
||||
logging.warning(f"Failed to validate notification: {e}. Message was: {message.message.root}")
|
||||
logging.warning("Failed to validate notification: %s. Message was: %s", e, message.message.root)
|
||||
else: # Response or error
|
||||
response_queue = self._response_streams.get(message.message.root.id)
|
||||
if response_queue is not None:
|
||||
|
||||
@@ -535,9 +535,19 @@ class LBModelManager:
|
||||
|
||||
if dify_config.DEBUG:
|
||||
logger.info(
|
||||
f"Model LB\nid: {config.id}\nname:{config.name}\n"
|
||||
f"tenant_id: {self._tenant_id}\nprovider: {self._provider}\n"
|
||||
f"model_type: {self._model_type.value}\nmodel: {self._model}"
|
||||
"""Model LB
|
||||
id: %s
|
||||
name:%s
|
||||
tenant_id: %s
|
||||
provider: %s
|
||||
model_type: %s
|
||||
model: %s""",
|
||||
config.id,
|
||||
config.name,
|
||||
self._tenant_id,
|
||||
self._provider,
|
||||
self._model_type.value,
|
||||
self._model,
|
||||
)
|
||||
|
||||
return config
|
||||
|
||||
@@ -440,7 +440,9 @@ class LargeLanguageModel(AIModel):
|
||||
if callback.raise_error:
|
||||
raise e
|
||||
else:
|
||||
logger.warning(f"Callback {callback.__class__.__name__} on_before_invoke failed with error {e}")
|
||||
logger.warning(
|
||||
"Callback %s on_before_invoke failed with error %s", callback.__class__.__name__, e
|
||||
)
|
||||
|
||||
def _trigger_new_chunk_callbacks(
|
||||
self,
|
||||
@@ -487,7 +489,7 @@ class LargeLanguageModel(AIModel):
|
||||
if callback.raise_error:
|
||||
raise e
|
||||
else:
|
||||
logger.warning(f"Callback {callback.__class__.__name__} on_new_chunk failed with error {e}")
|
||||
logger.warning("Callback %s on_new_chunk failed with error %s", callback.__class__.__name__, e)
|
||||
|
||||
def _trigger_after_invoke_callbacks(
|
||||
self,
|
||||
@@ -535,7 +537,9 @@ class LargeLanguageModel(AIModel):
|
||||
if callback.raise_error:
|
||||
raise e
|
||||
else:
|
||||
logger.warning(f"Callback {callback.__class__.__name__} on_after_invoke failed with error {e}")
|
||||
logger.warning(
|
||||
"Callback %s on_after_invoke failed with error %s", callback.__class__.__name__, e
|
||||
)
|
||||
|
||||
def _trigger_invoke_error_callbacks(
|
||||
self,
|
||||
@@ -583,4 +587,6 @@ class LargeLanguageModel(AIModel):
|
||||
if callback.raise_error:
|
||||
raise e
|
||||
else:
|
||||
logger.warning(f"Callback {callback.__class__.__name__} on_invoke_error failed with error {e}")
|
||||
logger.warning(
|
||||
"Callback %s on_invoke_error failed with error %s", callback.__class__.__name__, e
|
||||
)
|
||||
|
||||
@@ -136,6 +136,6 @@ class OutputModeration(BaseModel):
|
||||
result: ModerationOutputsResult = moderation_factory.moderation_for_outputs(moderation_buffer)
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.exception(f"Moderation Output error, app_id: {app_id}")
|
||||
logger.exception("Moderation Output error, app_id: %s", app_id)
|
||||
|
||||
return None
|
||||
|
||||
@@ -97,7 +97,7 @@ class AliyunDataTrace(BaseTraceInstance):
|
||||
try:
|
||||
return self.trace_client.get_project_url()
|
||||
except Exception as e:
|
||||
logger.info(f"Aliyun get run url failed: {str(e)}", exc_info=True)
|
||||
logger.info("Aliyun get run url failed: %s", str(e), exc_info=True)
|
||||
raise ValueError(f"Aliyun get run url failed: {str(e)}")
|
||||
|
||||
def workflow_trace(self, trace_info: WorkflowTraceInfo):
|
||||
@@ -286,7 +286,7 @@ class AliyunDataTrace(BaseTraceInstance):
|
||||
node_span = self.build_workflow_task_span(trace_id, workflow_span_id, trace_info, node_execution)
|
||||
return node_span
|
||||
except Exception as e:
|
||||
logging.debug(f"Error occurred in build_workflow_node_span: {e}", exc_info=True)
|
||||
logging.debug("Error occurred in build_workflow_node_span: %s", e, exc_info=True)
|
||||
return None
|
||||
|
||||
def get_workflow_node_status(self, node_execution: WorkflowNodeExecution) -> Status:
|
||||
|
||||
@@ -69,10 +69,10 @@ class TraceClient:
|
||||
if response.status_code == 405:
|
||||
return True
|
||||
else:
|
||||
logger.debug(f"AliyunTrace API check failed: Unexpected status code: {response.status_code}")
|
||||
logger.debug("AliyunTrace API check failed: Unexpected status code: %s", response.status_code)
|
||||
return False
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger.debug(f"AliyunTrace API check failed: {str(e)}")
|
||||
logger.debug("AliyunTrace API check failed: %s", str(e))
|
||||
raise ValueError(f"AliyunTrace API check failed: {str(e)}")
|
||||
|
||||
def get_project_url(self):
|
||||
@@ -109,7 +109,7 @@ class TraceClient:
|
||||
try:
|
||||
self.exporter.export(spans_to_export)
|
||||
except Exception as e:
|
||||
logger.debug(f"Error exporting spans: {e}")
|
||||
logger.debug("Error exporting spans: %s", e)
|
||||
|
||||
def shutdown(self):
|
||||
with self.condition:
|
||||
|
||||
@@ -77,10 +77,10 @@ def setup_tracer(arize_phoenix_config: ArizeConfig | PhoenixConfig) -> tuple[tra
|
||||
|
||||
# Create a named tracer instead of setting the global provider
|
||||
tracer_name = f"arize_phoenix_tracer_{arize_phoenix_config.project}"
|
||||
logger.info(f"[Arize/Phoenix] Created tracer with name: {tracer_name}")
|
||||
logger.info("[Arize/Phoenix] Created tracer with name: %s", tracer_name)
|
||||
return cast(trace_sdk.Tracer, provider.get_tracer(tracer_name)), processor
|
||||
except Exception as e:
|
||||
logger.error(f"[Arize/Phoenix] Failed to setup the tracer: {str(e)}", exc_info=True)
|
||||
logger.error("[Arize/Phoenix] Failed to setup the tracer: %s", str(e), exc_info=True)
|
||||
raise
|
||||
|
||||
|
||||
@@ -120,7 +120,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance):
|
||||
self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001")
|
||||
|
||||
def trace(self, trace_info: BaseTraceInfo):
|
||||
logger.info(f"[Arize/Phoenix] Trace: {trace_info}")
|
||||
logger.info("[Arize/Phoenix] Trace: %s", trace_info)
|
||||
try:
|
||||
if isinstance(trace_info, WorkflowTraceInfo):
|
||||
self.workflow_trace(trace_info)
|
||||
@@ -138,7 +138,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance):
|
||||
self.generate_name_trace(trace_info)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[Arize/Phoenix] Error in the trace: {str(e)}", exc_info=True)
|
||||
logger.error("[Arize/Phoenix] Error in the trace: %s", str(e), exc_info=True)
|
||||
raise
|
||||
|
||||
def workflow_trace(self, trace_info: WorkflowTraceInfo):
|
||||
@@ -570,7 +570,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance):
|
||||
|
||||
trace_id = uuid_to_trace_id(trace_info.message_id)
|
||||
tool_span_id = RandomIdGenerator().generate_span_id()
|
||||
logger.info(f"[Arize/Phoenix] Creating tool trace with trace_id: {trace_id}, span_id: {tool_span_id}")
|
||||
logger.info("[Arize/Phoenix] Creating tool trace with trace_id: %s, span_id: %s", trace_id, tool_span_id)
|
||||
|
||||
# Create span context with the same trace_id as the parent
|
||||
# todo: Create with the appropriate parent span context, so that the tool span is
|
||||
@@ -673,7 +673,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance):
|
||||
span.set_attribute("test", "true")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.info(f"[Arize/Phoenix] API check failed: {str(e)}", exc_info=True)
|
||||
logger.info("[Arize/Phoenix] API check failed: %s", str(e), exc_info=True)
|
||||
raise ValueError(f"[Arize/Phoenix] API check failed: {str(e)}")
|
||||
|
||||
def get_project_url(self):
|
||||
@@ -683,7 +683,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance):
|
||||
else:
|
||||
return f"{self.arize_phoenix_config.endpoint}/projects/"
|
||||
except Exception as e:
|
||||
logger.info(f"[Arize/Phoenix] Get run url failed: {str(e)}", exc_info=True)
|
||||
logger.info("[Arize/Phoenix] Get run url failed: %s", str(e), exc_info=True)
|
||||
raise ValueError(f"[Arize/Phoenix] Get run url failed: {str(e)}")
|
||||
|
||||
def _get_workflow_nodes(self, workflow_run_id: str):
|
||||
|
||||
@@ -440,7 +440,7 @@ class LangFuseDataTrace(BaseTraceInstance):
|
||||
try:
|
||||
return self.langfuse_client.auth_check()
|
||||
except Exception as e:
|
||||
logger.debug(f"LangFuse API check failed: {str(e)}")
|
||||
logger.debug("LangFuse API check failed: %s", str(e))
|
||||
raise ValueError(f"LangFuse API check failed: {str(e)}")
|
||||
|
||||
def get_project_key(self):
|
||||
@@ -448,5 +448,5 @@ class LangFuseDataTrace(BaseTraceInstance):
|
||||
projects = self.langfuse_client.client.projects.get()
|
||||
return projects.data[0].id
|
||||
except Exception as e:
|
||||
logger.debug(f"LangFuse get project key failed: {str(e)}")
|
||||
logger.debug("LangFuse get project key failed: %s", str(e))
|
||||
raise ValueError(f"LangFuse get project key failed: {str(e)}")
|
||||
|
||||
@@ -504,7 +504,7 @@ class LangSmithDataTrace(BaseTraceInstance):
|
||||
self.langsmith_client.delete_project(project_name=random_project_name)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.debug(f"LangSmith API check failed: {str(e)}")
|
||||
logger.debug("LangSmith API check failed: %s", str(e))
|
||||
raise ValueError(f"LangSmith API check failed: {str(e)}")
|
||||
|
||||
def get_project_url(self):
|
||||
@@ -523,5 +523,5 @@ class LangSmithDataTrace(BaseTraceInstance):
|
||||
)
|
||||
return project_url.split("/r/")[0]
|
||||
except Exception as e:
|
||||
logger.debug(f"LangSmith get run url failed: {str(e)}")
|
||||
logger.debug("LangSmith get run url failed: %s", str(e))
|
||||
raise ValueError(f"LangSmith get run url failed: {str(e)}")
|
||||
|
||||
@@ -453,12 +453,12 @@ class OpikDataTrace(BaseTraceInstance):
|
||||
self.opik_client.auth_check()
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.info(f"Opik API check failed: {str(e)}", exc_info=True)
|
||||
logger.info("Opik API check failed: %s", str(e), exc_info=True)
|
||||
raise ValueError(f"Opik API check failed: {str(e)}")
|
||||
|
||||
def get_project_url(self):
|
||||
try:
|
||||
return self.opik_client.get_project_url(project_name=self.project)
|
||||
except Exception as e:
|
||||
logger.info(f"Opik get run url failed: {str(e)}", exc_info=True)
|
||||
logger.info("Opik get run url failed: %s", str(e), exc_info=True)
|
||||
raise ValueError(f"Opik get run url failed: {str(e)}")
|
||||
|
||||
@@ -287,7 +287,7 @@ class OpsTraceManager:
|
||||
# create new tracing_instance and update the cache if it absent
|
||||
tracing_instance = trace_instance(config_class(**decrypt_trace_config))
|
||||
cls.ops_trace_instances_cache[decrypt_trace_config_key] = tracing_instance
|
||||
logging.info(f"new tracing_instance for app_id: {app_id}")
|
||||
logging.info("new tracing_instance for app_id: %s", app_id)
|
||||
return tracing_instance
|
||||
|
||||
@classmethod
|
||||
@@ -843,7 +843,7 @@ class TraceQueueManager:
|
||||
trace_task.app_id = self.app_id
|
||||
trace_manager_queue.put(trace_task)
|
||||
except Exception as e:
|
||||
logging.exception(f"Error adding trace task, trace_type {trace_task.trace_type}")
|
||||
logging.exception("Error adding trace task, trace_type %s", trace_task.trace_type)
|
||||
finally:
|
||||
self.start_timer()
|
||||
|
||||
|
||||
@@ -66,11 +66,11 @@ class WeaveDataTrace(BaseTraceInstance):
|
||||
project_url = f"https://wandb.ai/{self.weave_client._project_id()}"
|
||||
return project_url
|
||||
except Exception as e:
|
||||
logger.debug(f"Weave get run url failed: {str(e)}")
|
||||
logger.debug("Weave get run url failed: %s", str(e))
|
||||
raise ValueError(f"Weave get run url failed: {str(e)}")
|
||||
|
||||
def trace(self, trace_info: BaseTraceInfo):
|
||||
logger.debug(f"Trace info: {trace_info}")
|
||||
logger.debug("Trace info: %s", trace_info)
|
||||
if isinstance(trace_info, WorkflowTraceInfo):
|
||||
self.workflow_trace(trace_info)
|
||||
if isinstance(trace_info, MessageTraceInfo):
|
||||
@@ -403,7 +403,7 @@ class WeaveDataTrace(BaseTraceInstance):
|
||||
print("Weave login successful")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.debug(f"Weave API check failed: {str(e)}")
|
||||
logger.debug("Weave API check failed: %s", str(e))
|
||||
raise ValueError(f"Weave API check failed: {str(e)}")
|
||||
|
||||
def start_call(self, run_data: WeaveTraceModel, parent_run_id: Optional[str] = None):
|
||||
|
||||
@@ -24,7 +24,7 @@ class Jieba(BaseKeyword):
|
||||
self._config = KeywordTableConfig()
|
||||
|
||||
def create(self, texts: list[Document], **kwargs) -> BaseKeyword:
|
||||
lock_name = "keyword_indexing_lock_{}".format(self.dataset.id)
|
||||
lock_name = f"keyword_indexing_lock_{self.dataset.id}"
|
||||
with redis_client.lock(lock_name, timeout=600):
|
||||
keyword_table_handler = JiebaKeywordTableHandler()
|
||||
keyword_table = self._get_dataset_keyword_table()
|
||||
@@ -43,7 +43,7 @@ class Jieba(BaseKeyword):
|
||||
return self
|
||||
|
||||
def add_texts(self, texts: list[Document], **kwargs):
|
||||
lock_name = "keyword_indexing_lock_{}".format(self.dataset.id)
|
||||
lock_name = f"keyword_indexing_lock_{self.dataset.id}"
|
||||
with redis_client.lock(lock_name, timeout=600):
|
||||
keyword_table_handler = JiebaKeywordTableHandler()
|
||||
|
||||
@@ -76,7 +76,7 @@ class Jieba(BaseKeyword):
|
||||
return id in set.union(*keyword_table.values())
|
||||
|
||||
def delete_by_ids(self, ids: list[str]) -> None:
|
||||
lock_name = "keyword_indexing_lock_{}".format(self.dataset.id)
|
||||
lock_name = f"keyword_indexing_lock_{self.dataset.id}"
|
||||
with redis_client.lock(lock_name, timeout=600):
|
||||
keyword_table = self._get_dataset_keyword_table()
|
||||
if keyword_table is not None:
|
||||
@@ -116,7 +116,7 @@ class Jieba(BaseKeyword):
|
||||
return documents
|
||||
|
||||
def delete(self) -> None:
|
||||
lock_name = "keyword_indexing_lock_{}".format(self.dataset.id)
|
||||
lock_name = f"keyword_indexing_lock_{self.dataset.id}"
|
||||
with redis_client.lock(lock_name, timeout=600):
|
||||
dataset_keyword_table = self.dataset.dataset_keyword_table
|
||||
if dataset_keyword_table:
|
||||
|
||||
@@ -203,9 +203,9 @@ class BaiduVector(BaseVector):
|
||||
|
||||
def _create_table(self, dimension: int) -> None:
|
||||
# Try to grab distributed lock and create table
|
||||
lock_name = "vector_indexing_lock_{}".format(self._collection_name)
|
||||
lock_name = f"vector_indexing_lock_{self._collection_name}"
|
||||
with redis_client.lock(lock_name, timeout=60):
|
||||
table_exist_cache_key = "vector_indexing_{}".format(self._collection_name)
|
||||
table_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(table_exist_cache_key):
|
||||
return
|
||||
|
||||
|
||||
@@ -57,9 +57,9 @@ class ChromaVector(BaseVector):
|
||||
self.add_texts(texts, embeddings, **kwargs)
|
||||
|
||||
def create_collection(self, collection_name: str):
|
||||
lock_name = "vector_indexing_lock_{}".format(collection_name)
|
||||
lock_name = f"vector_indexing_lock_{collection_name}"
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = "vector_indexing_{}".format(self._collection_name)
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
return
|
||||
self._client.get_or_create_collection(collection_name)
|
||||
|
||||
@@ -74,9 +74,9 @@ class CouchbaseVector(BaseVector):
|
||||
self.add_texts(texts, embeddings)
|
||||
|
||||
def _create_collection(self, vector_length: int, uuid: str):
|
||||
lock_name = "vector_indexing_lock_{}".format(self._collection_name)
|
||||
lock_name = f"vector_indexing_lock_{self._collection_name}"
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = "vector_indexing_{}".format(self._collection_name)
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
return
|
||||
if self._collection_exists(self._collection_name):
|
||||
@@ -242,7 +242,7 @@ class CouchbaseVector(BaseVector):
|
||||
try:
|
||||
self._cluster.query(query, named_parameters={"doc_ids": ids}).execute()
|
||||
except Exception as e:
|
||||
logger.exception(f"Failed to delete documents, ids: {ids}")
|
||||
logger.exception("Failed to delete documents, ids: %s", ids)
|
||||
|
||||
def delete_by_document_id(self, document_id: str):
|
||||
query = f"""
|
||||
|
||||
@@ -29,7 +29,7 @@ class ElasticSearchJaVector(ElasticSearchVector):
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
logger.info(f"Collection {self._collection_name} already exists.")
|
||||
logger.info("Collection %s already exists.", self._collection_name)
|
||||
return
|
||||
|
||||
if not self._client.indices.exists(index=self._collection_name):
|
||||
|
||||
@@ -186,7 +186,7 @@ class ElasticSearchVector(BaseVector):
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
logger.info(f"Collection {self._collection_name} already exists.")
|
||||
logger.info("Collection %s already exists.", self._collection_name)
|
||||
return
|
||||
|
||||
if not self._client.indices.exists(index=self._collection_name):
|
||||
|
||||
@@ -164,7 +164,7 @@ class HuaweiCloudVector(BaseVector):
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
logger.info(f"Collection {self._collection_name} already exists.")
|
||||
logger.info("Collection %s already exists.", self._collection_name)
|
||||
return
|
||||
|
||||
if not self._client.indices.exists(index=self._collection_name):
|
||||
|
||||
@@ -89,7 +89,7 @@ class LindormVectorStore(BaseVector):
|
||||
timeout: int = 60,
|
||||
**kwargs,
|
||||
):
|
||||
logger.info(f"Total documents to add: {len(documents)}")
|
||||
logger.info("Total documents to add: %s", len(documents))
|
||||
uuids = self._get_uuids(documents)
|
||||
|
||||
total_docs = len(documents)
|
||||
@@ -147,7 +147,7 @@ class LindormVectorStore(BaseVector):
|
||||
time.sleep(0.5)
|
||||
|
||||
except Exception:
|
||||
logger.exception(f"Failed to process batch {batch_num + 1}")
|
||||
logger.exception("Failed to process batch %s", batch_num + 1)
|
||||
raise
|
||||
|
||||
def get_ids_by_metadata_field(self, key: str, value: str):
|
||||
@@ -180,7 +180,7 @@ class LindormVectorStore(BaseVector):
|
||||
|
||||
# 1. First check if collection exists
|
||||
if not self._client.indices.exists(index=self._collection_name):
|
||||
logger.warning(f"Collection {self._collection_name} does not exist")
|
||||
logger.warning("Collection %s does not exist", self._collection_name)
|
||||
return
|
||||
|
||||
# 2. Batch process deletions
|
||||
@@ -196,7 +196,7 @@ class LindormVectorStore(BaseVector):
|
||||
}
|
||||
)
|
||||
else:
|
||||
logger.warning(f"DELETE BY ID: ID {id} does not exist in the index.")
|
||||
logger.warning("DELETE BY ID: ID %s does not exist in the index.", id)
|
||||
|
||||
# 3. Perform bulk deletion if there are valid documents to delete
|
||||
if actions:
|
||||
@@ -209,9 +209,9 @@ class LindormVectorStore(BaseVector):
|
||||
doc_id = delete_error.get("_id")
|
||||
|
||||
if status == 404:
|
||||
logger.warning(f"Document not found for deletion: {doc_id}")
|
||||
logger.warning("Document not found for deletion: %s", doc_id)
|
||||
else:
|
||||
logger.exception(f"Error deleting document: {error}")
|
||||
logger.exception("Error deleting document: %s", error)
|
||||
|
||||
def delete(self) -> None:
|
||||
if self._using_ugc:
|
||||
@@ -225,7 +225,7 @@ class LindormVectorStore(BaseVector):
|
||||
self._client.indices.delete(index=self._collection_name, params={"timeout": 60})
|
||||
logger.info("Delete index success")
|
||||
else:
|
||||
logger.warning(f"Index '{self._collection_name}' does not exist. No deletion performed.")
|
||||
logger.warning("Index '%s' does not exist. No deletion performed.", self._collection_name)
|
||||
|
||||
def text_exists(self, id: str) -> bool:
|
||||
try:
|
||||
@@ -257,7 +257,7 @@ class LindormVectorStore(BaseVector):
|
||||
params["routing"] = self._routing # type: ignore
|
||||
response = self._client.search(index=self._collection_name, body=query, params=params)
|
||||
except Exception:
|
||||
logger.exception(f"Error executing vector search, query: {query}")
|
||||
logger.exception("Error executing vector search, query: %s", query)
|
||||
raise
|
||||
|
||||
docs_and_scores = []
|
||||
@@ -324,10 +324,10 @@ class LindormVectorStore(BaseVector):
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
logger.info(f"Collection {self._collection_name} already exists.")
|
||||
logger.info("Collection %s already exists.", self._collection_name)
|
||||
return
|
||||
if self._client.indices.exists(index=self._collection_name):
|
||||
logger.info(f"{self._collection_name.lower()} already exists.")
|
||||
logger.info("%s already exists.", self._collection_name.lower())
|
||||
redis_client.set(collection_exist_cache_key, 1, ex=3600)
|
||||
return
|
||||
if len(self.kwargs) == 0 and len(kwargs) != 0:
|
||||
|
||||
@@ -103,7 +103,7 @@ class MilvusVector(BaseVector):
|
||||
# For standard Milvus installations, check version number
|
||||
return version.parse(milvus_version).base_version >= version.parse("2.5.0").base_version
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to check Milvus version: {str(e)}. Disabling hybrid search.")
|
||||
logger.warning("Failed to check Milvus version: %s. Disabling hybrid search.", str(e))
|
||||
return False
|
||||
|
||||
def get_type(self) -> str:
|
||||
@@ -289,9 +289,9 @@ class MilvusVector(BaseVector):
|
||||
"""
|
||||
Create a new collection in Milvus with the specified schema and index parameters.
|
||||
"""
|
||||
lock_name = "vector_indexing_lock_{}".format(self._collection_name)
|
||||
lock_name = f"vector_indexing_lock_{self._collection_name}"
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = "vector_indexing_{}".format(self._collection_name)
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
return
|
||||
# Grab the existing collection if it exists
|
||||
|
||||
@@ -53,7 +53,7 @@ class MyScaleVector(BaseVector):
|
||||
return self.add_texts(documents=texts, embeddings=embeddings, **kwargs)
|
||||
|
||||
def _create_collection(self, dimension: int):
|
||||
logging.info(f"create MyScale collection {self._collection_name} with dimension {dimension}")
|
||||
logging.info("create MyScale collection %s with dimension %s", self._collection_name, dimension)
|
||||
self._client.command(f"CREATE DATABASE IF NOT EXISTS {self._config.database}")
|
||||
fts_params = f"('{self._config.fts_params}')" if self._config.fts_params else ""
|
||||
sql = f"""
|
||||
@@ -151,7 +151,7 @@ class MyScaleVector(BaseVector):
|
||||
for r in self._client.query(sql).named_results()
|
||||
]
|
||||
except Exception as e:
|
||||
logging.exception(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") # noqa:TRY401
|
||||
logging.exception("\033[91m\033[1m%s\033[0m \033[95m%s\033[0m", type(e), str(e)) # noqa:TRY401
|
||||
return []
|
||||
|
||||
def delete(self) -> None:
|
||||
|
||||
@@ -147,7 +147,7 @@ class OceanBaseVector(BaseVector):
|
||||
logger.debug("Current OceanBase version is %s", ob_version)
|
||||
return version.parse(ob_version).base_version >= version.parse("4.3.5.1").base_version
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to check OceanBase version: {str(e)}. Disabling hybrid search.")
|
||||
logger.warning("Failed to check OceanBase version: %s. Disabling hybrid search.", str(e))
|
||||
return False
|
||||
|
||||
def add_texts(self, documents: list[Document], embeddings: list[list[float]], **kwargs):
|
||||
@@ -229,7 +229,7 @@ class OceanBaseVector(BaseVector):
|
||||
|
||||
return docs
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to fulltext search: {str(e)}.")
|
||||
logger.warning("Failed to fulltext search: %s.", str(e))
|
||||
return []
|
||||
|
||||
def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]:
|
||||
|
||||
@@ -131,7 +131,7 @@ class OpenSearchVector(BaseVector):
|
||||
def delete_by_ids(self, ids: list[str]) -> None:
|
||||
index_name = self._collection_name.lower()
|
||||
if not self._client.indices.exists(index=index_name):
|
||||
logger.warning(f"Index {index_name} does not exist")
|
||||
logger.warning("Index %s does not exist", index_name)
|
||||
return
|
||||
|
||||
# Obtaining All Actual Documents_ID
|
||||
@@ -142,7 +142,7 @@ class OpenSearchVector(BaseVector):
|
||||
if es_ids:
|
||||
actual_ids.extend(es_ids)
|
||||
else:
|
||||
logger.warning(f"Document with metadata doc_id {doc_id} not found for deletion")
|
||||
logger.warning("Document with metadata doc_id %s not found for deletion", doc_id)
|
||||
|
||||
if actual_ids:
|
||||
actions = [{"_op_type": "delete", "_index": index_name, "_id": es_id} for es_id in actual_ids]
|
||||
@@ -155,9 +155,9 @@ class OpenSearchVector(BaseVector):
|
||||
doc_id = delete_error.get("_id")
|
||||
|
||||
if status == 404:
|
||||
logger.warning(f"Document not found for deletion: {doc_id}")
|
||||
logger.warning("Document not found for deletion: %s", doc_id)
|
||||
else:
|
||||
logger.exception(f"Error deleting document: {error}")
|
||||
logger.exception("Error deleting document: %s", error)
|
||||
|
||||
def delete(self) -> None:
|
||||
self._client.indices.delete(index=self._collection_name.lower())
|
||||
@@ -198,7 +198,7 @@ class OpenSearchVector(BaseVector):
|
||||
try:
|
||||
response = self._client.search(index=self._collection_name.lower(), body=query)
|
||||
except Exception as e:
|
||||
logger.exception(f"Error executing vector search, query: {query}")
|
||||
logger.exception("Error executing vector search, query: %s", query)
|
||||
raise
|
||||
|
||||
docs = []
|
||||
@@ -242,7 +242,7 @@ class OpenSearchVector(BaseVector):
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name.lower()}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
logger.info(f"Collection {self._collection_name.lower()} already exists.")
|
||||
logger.info("Collection %s already exists.", self._collection_name.lower())
|
||||
return
|
||||
|
||||
if not self._client.indices.exists(index=self._collection_name.lower()):
|
||||
@@ -272,7 +272,7 @@ class OpenSearchVector(BaseVector):
|
||||
},
|
||||
}
|
||||
|
||||
logger.info(f"Creating OpenSearch index {self._collection_name.lower()}")
|
||||
logger.info("Creating OpenSearch index %s", self._collection_name.lower())
|
||||
self._client.indices.create(index=self._collection_name.lower(), body=index_body)
|
||||
|
||||
redis_client.set(collection_exist_cache_key, 1, ex=3600)
|
||||
|
||||
@@ -82,9 +82,9 @@ class PGVectoRS(BaseVector):
|
||||
self.add_texts(texts, embeddings)
|
||||
|
||||
def create_collection(self, dimension: int):
|
||||
lock_name = "vector_indexing_lock_{}".format(self._collection_name)
|
||||
lock_name = f"vector_indexing_lock_{self._collection_name}"
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = "vector_indexing_{}".format(self._collection_name)
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
return
|
||||
index_name = f"{self._collection_name}_embedding_index"
|
||||
|
||||
@@ -155,7 +155,7 @@ class PGVector(BaseVector):
|
||||
cur.execute(f"DELETE FROM {self.table_name} WHERE id IN %s", (tuple(ids),))
|
||||
except psycopg2.errors.UndefinedTable:
|
||||
# table not exists
|
||||
logging.warning(f"Table {self.table_name} not found, skipping delete operation.")
|
||||
logging.warning("Table %s not found, skipping delete operation.", self.table_name)
|
||||
return
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
||||
@@ -95,9 +95,9 @@ class QdrantVector(BaseVector):
|
||||
self.add_texts(texts, embeddings, **kwargs)
|
||||
|
||||
def create_collection(self, collection_name: str, vector_size: int):
|
||||
lock_name = "vector_indexing_lock_{}".format(collection_name)
|
||||
lock_name = f"vector_indexing_lock_{collection_name}"
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = "vector_indexing_{}".format(self._collection_name)
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
return
|
||||
collection_name = collection_name or uuid.uuid4().hex
|
||||
|
||||
@@ -70,9 +70,9 @@ class RelytVector(BaseVector):
|
||||
self.add_texts(texts, embeddings)
|
||||
|
||||
def create_collection(self, dimension: int):
|
||||
lock_name = "vector_indexing_lock_{}".format(self._collection_name)
|
||||
lock_name = f"vector_indexing_lock_{self._collection_name}"
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = "vector_indexing_{}".format(self._collection_name)
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
return
|
||||
index_name = f"{self._collection_name}_embedding_index"
|
||||
|
||||
@@ -142,7 +142,7 @@ class TableStoreVector(BaseVector):
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
logging.info(f"Collection {self._collection_name} already exists.")
|
||||
logging.info("Collection %s already exists.", self._collection_name)
|
||||
return
|
||||
|
||||
self._create_table_if_not_exist()
|
||||
|
||||
@@ -92,9 +92,9 @@ class TencentVector(BaseVector):
|
||||
|
||||
def _create_collection(self, dimension: int) -> None:
|
||||
self._dimension = dimension
|
||||
lock_name = "vector_indexing_lock_{}".format(self._collection_name)
|
||||
lock_name = f"vector_indexing_lock_{self._collection_name}"
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = "vector_indexing_{}".format(self._collection_name)
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
return
|
||||
|
||||
|
||||
@@ -104,9 +104,9 @@ class TidbOnQdrantVector(BaseVector):
|
||||
self.add_texts(texts, embeddings, **kwargs)
|
||||
|
||||
def create_collection(self, collection_name: str, vector_size: int):
|
||||
lock_name = "vector_indexing_lock_{}".format(collection_name)
|
||||
lock_name = f"vector_indexing_lock_{collection_name}"
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = "vector_indexing_{}".format(self._collection_name)
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
return
|
||||
collection_name = collection_name or uuid.uuid4().hex
|
||||
|
||||
@@ -91,9 +91,9 @@ class TiDBVector(BaseVector):
|
||||
|
||||
def _create_collection(self, dimension: int):
|
||||
logger.info("_create_collection, collection_name " + self._collection_name)
|
||||
lock_name = "vector_indexing_lock_{}".format(self._collection_name)
|
||||
lock_name = f"vector_indexing_lock_{self._collection_name}"
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = "vector_indexing_{}".format(self._collection_name)
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
return
|
||||
tidb_dist_func = self._get_distance_func()
|
||||
@@ -192,7 +192,7 @@ class TiDBVector(BaseVector):
|
||||
query_vector_str = ", ".join(format(x) for x in query_vector)
|
||||
query_vector_str = "[" + query_vector_str + "]"
|
||||
logger.debug(
|
||||
f"_collection_name: {self._collection_name}, score_threshold: {score_threshold}, distance: {distance}"
|
||||
"_collection_name: %s, score_threshold: %s, distance: %s", self._collection_name, score_threshold, distance
|
||||
)
|
||||
|
||||
docs = []
|
||||
|
||||
@@ -178,19 +178,19 @@ class Vector:
|
||||
def create(self, texts: Optional[list] = None, **kwargs):
|
||||
if texts:
|
||||
start = time.time()
|
||||
logger.info(f"start embedding {len(texts)} texts {start}")
|
||||
logger.info("start embedding %s texts %s", len(texts), start)
|
||||
batch_size = 1000
|
||||
total_batches = len(texts) + batch_size - 1
|
||||
for i in range(0, len(texts), batch_size):
|
||||
batch = texts[i : i + batch_size]
|
||||
batch_start = time.time()
|
||||
logger.info(f"Processing batch {i // batch_size + 1}/{total_batches} ({len(batch)} texts)")
|
||||
logger.info("Processing batch %s/%s (%s texts)", i // batch_size + 1, total_batches, len(batch))
|
||||
batch_embeddings = self._embeddings.embed_documents([document.page_content for document in batch])
|
||||
logger.info(
|
||||
f"Embedding batch {i // batch_size + 1}/{total_batches} took {time.time() - batch_start:.3f}s"
|
||||
"Embedding batch %s/%s took %s s", i // batch_size + 1, total_batches, time.time() - batch_start
|
||||
)
|
||||
self._vector_processor.create(texts=batch, embeddings=batch_embeddings, **kwargs)
|
||||
logger.info(f"Embedding {len(texts)} texts took {time.time() - start:.3f}s")
|
||||
logger.info("Embedding %s texts took %s s", len(texts), time.time() - start)
|
||||
|
||||
def add_texts(self, documents: list[Document], **kwargs):
|
||||
if kwargs.get("duplicate_check", False):
|
||||
@@ -219,7 +219,7 @@ class Vector:
|
||||
self._vector_processor.delete()
|
||||
# delete collection redis cache
|
||||
if self._vector_processor.collection_name:
|
||||
collection_exist_cache_key = "vector_indexing_{}".format(self._vector_processor.collection_name)
|
||||
collection_exist_cache_key = f"vector_indexing_{self._vector_processor.collection_name}"
|
||||
redis_client.delete(collection_exist_cache_key)
|
||||
|
||||
def _get_embeddings(self) -> Embeddings:
|
||||
|
||||
@@ -92,9 +92,9 @@ class WeaviateVector(BaseVector):
|
||||
self.add_texts(texts, embeddings)
|
||||
|
||||
def _create_collection(self):
|
||||
lock_name = "vector_indexing_lock_{}".format(self._collection_name)
|
||||
lock_name = f"vector_indexing_lock_{self._collection_name}"
|
||||
with redis_client.lock(lock_name, timeout=20):
|
||||
collection_exist_cache_key = "vector_indexing_{}".format(self._collection_name)
|
||||
collection_exist_cache_key = f"vector_indexing_{self._collection_name}"
|
||||
if redis_client.get(collection_exist_cache_key):
|
||||
return
|
||||
schema = self._default_schema(self._collection_name)
|
||||
|
||||
@@ -69,7 +69,7 @@ class CacheEmbedding(Embeddings):
|
||||
# stackoverflow best way: https://stackoverflow.com/questions/20319813/how-to-check-list-containing-nan
|
||||
if np.isnan(normalized_embedding).any():
|
||||
# for issue #11827 float values are not json compliant
|
||||
logger.warning(f"Normalized embedding is nan: {normalized_embedding}")
|
||||
logger.warning("Normalized embedding is nan: %s", normalized_embedding)
|
||||
continue
|
||||
embedding_queue_embeddings.append(normalized_embedding)
|
||||
except IntegrityError:
|
||||
@@ -122,7 +122,7 @@ class CacheEmbedding(Embeddings):
|
||||
raise ValueError("Normalized embedding is nan please try again")
|
||||
except Exception as ex:
|
||||
if dify_config.DEBUG:
|
||||
logging.exception(f"Failed to embed query text '{text[:10]}...({len(text)} chars)'")
|
||||
logging.exception("Failed to embed query text '%s...(%s chars)'", text[:10], len(text))
|
||||
raise ex
|
||||
|
||||
try:
|
||||
@@ -136,7 +136,9 @@ class CacheEmbedding(Embeddings):
|
||||
redis_client.setex(embedding_cache_key, 600, encoded_str)
|
||||
except Exception as ex:
|
||||
if dify_config.DEBUG:
|
||||
logging.exception(f"Failed to add embedding to redis for the text '{text[:10]}...({len(text)} chars)'")
|
||||
logging.exception(
|
||||
"Failed to add embedding to redis for the text '%s...(%s chars)'", text[:10], len(text)
|
||||
)
|
||||
raise ex
|
||||
|
||||
return embedding_results # type: ignore
|
||||
|
||||
@@ -116,7 +116,7 @@ class TextSplitter(BaseDocumentTransformer, ABC):
|
||||
if total + _len + (separator_len if len(current_doc) > 0 else 0) > self._chunk_size:
|
||||
if total > self._chunk_size:
|
||||
logger.warning(
|
||||
f"Created a chunk of size {total}, which is longer than the specified {self._chunk_size}"
|
||||
"Created a chunk of size %s, which is longer than the specified %s", total, self._chunk_size
|
||||
)
|
||||
if len(current_doc) > 0:
|
||||
doc = self._join_docs(current_doc, separator)
|
||||
|
||||
@@ -153,7 +153,7 @@ class DifyCoreRepositoryFactory:
|
||||
RepositoryImportError: If the configured repository cannot be created
|
||||
"""
|
||||
class_path = dify_config.CORE_WORKFLOW_EXECUTION_REPOSITORY
|
||||
logger.debug(f"Creating WorkflowExecutionRepository from: {class_path}")
|
||||
logger.debug("Creating WorkflowExecutionRepository from: %s", class_path)
|
||||
|
||||
try:
|
||||
repository_class = cls._import_class(class_path)
|
||||
@@ -199,7 +199,7 @@ class DifyCoreRepositoryFactory:
|
||||
RepositoryImportError: If the configured repository cannot be created
|
||||
"""
|
||||
class_path = dify_config.CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY
|
||||
logger.debug(f"Creating WorkflowNodeExecutionRepository from: {class_path}")
|
||||
logger.debug("Creating WorkflowNodeExecutionRepository from: %s", class_path)
|
||||
|
||||
try:
|
||||
repository_class = cls._import_class(class_path)
|
||||
|
||||
@@ -203,5 +203,5 @@ class SQLAlchemyWorkflowExecutionRepository(WorkflowExecutionRepository):
|
||||
session.commit()
|
||||
|
||||
# Update the in-memory cache for faster subsequent lookups
|
||||
logger.debug(f"Updating cache for execution_id: {db_model.id}")
|
||||
logger.debug("Updating cache for execution_id: %s", db_model.id)
|
||||
self._execution_cache[db_model.id] = db_model
|
||||
|
||||
@@ -215,7 +215,7 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository)
|
||||
# Update the in-memory cache for faster subsequent lookups
|
||||
# Only cache if we have a node_execution_id to use as the cache key
|
||||
if db_model.node_execution_id:
|
||||
logger.debug(f"Updating cache for node_execution_id: {db_model.node_execution_id}")
|
||||
logger.debug("Updating cache for node_execution_id: %s", db_model.node_execution_id)
|
||||
self._node_execution_cache[db_model.node_execution_id] = db_model
|
||||
|
||||
def get_db_models_by_workflow_run(
|
||||
|
||||
@@ -206,7 +206,7 @@ class ToolManager:
|
||||
)
|
||||
except Exception as e:
|
||||
builtin_provider = None
|
||||
logger.info(f"Error getting builtin provider {credential_id}:{e}", exc_info=True)
|
||||
logger.info("Error getting builtin provider %s:%s", credential_id, e, exc_info=True)
|
||||
# if the provider has been deleted, raise an error
|
||||
if builtin_provider is None:
|
||||
raise ToolProviderNotFoundError(f"provider has been deleted: {credential_id}")
|
||||
@@ -569,7 +569,7 @@ class ToolManager:
|
||||
yield provider
|
||||
|
||||
except Exception:
|
||||
logger.exception(f"load builtin provider {provider_path}")
|
||||
logger.exception("load builtin provider %s", provider_path)
|
||||
continue
|
||||
# set builtin providers loaded
|
||||
cls._builtin_providers_loaded = True
|
||||
|
||||
@@ -55,7 +55,7 @@ def get_url(url: str, user_agent: Optional[str] = None) -> str:
|
||||
main_content_type = mimetypes.guess_type(filename)[0]
|
||||
|
||||
if main_content_type not in supported_content_types:
|
||||
return "Unsupported content-type [{}] of URL.".format(main_content_type)
|
||||
return f"Unsupported content-type [{main_content_type}] of URL."
|
||||
|
||||
if main_content_type in extract_processor.SUPPORT_URL_CONTENT_TYPES:
|
||||
return cast(str, ExtractProcessor.load_from_url(url, return_text=True))
|
||||
@@ -67,7 +67,7 @@ def get_url(url: str, user_agent: Optional[str] = None) -> str:
|
||||
response = scraper.get(url, headers=headers, follow_redirects=True, timeout=(120, 300)) # type: ignore
|
||||
|
||||
if response.status_code != 200:
|
||||
return "URL returned status code {}.".format(response.status_code)
|
||||
return f"URL returned status code {response.status_code}."
|
||||
|
||||
# Detect encoding using chardet
|
||||
detected_encoding = chardet.detect(response.content)
|
||||
|
||||
@@ -194,7 +194,7 @@ class WorkflowTool(Tool):
|
||||
|
||||
files.append(file_dict)
|
||||
except Exception:
|
||||
logger.exception(f"Failed to transform file {file}")
|
||||
logger.exception("Failed to transform file %s", file)
|
||||
else:
|
||||
parameters_result[parameter.name] = tool_parameters.get(parameter.name)
|
||||
|
||||
|
||||
@@ -238,13 +238,13 @@ class GraphEngine:
|
||||
while True:
|
||||
# max steps reached
|
||||
if self.graph_runtime_state.node_run_steps > self.max_execution_steps:
|
||||
raise GraphRunFailedError("Max steps {} reached.".format(self.max_execution_steps))
|
||||
raise GraphRunFailedError(f"Max steps {self.max_execution_steps} reached.")
|
||||
|
||||
# or max execution time reached
|
||||
if self._is_timed_out(
|
||||
start_at=self.graph_runtime_state.start_at, max_execution_time=self.max_execution_time
|
||||
):
|
||||
raise GraphRunFailedError("Max execution time {}s reached.".format(self.max_execution_time))
|
||||
raise GraphRunFailedError(f"Max execution time {self.max_execution_time}s reached.")
|
||||
|
||||
# init route node state
|
||||
route_node_state = self.graph_runtime_state.node_run_state.create_node_state(node_id=next_node_id)
|
||||
@@ -377,7 +377,7 @@ class GraphEngine:
|
||||
|
||||
edge = cast(GraphEdge, sub_edge_mappings[0])
|
||||
if edge.run_condition is None:
|
||||
logger.warning(f"Edge {edge.target_node_id} run condition is None")
|
||||
logger.warning("Edge %s run condition is None", edge.target_node_id)
|
||||
continue
|
||||
|
||||
result = ConditionManager.get_condition_handler(
|
||||
@@ -848,7 +848,7 @@ class GraphEngine:
|
||||
)
|
||||
return
|
||||
except Exception as e:
|
||||
logger.exception(f"Node {node.title} run failed")
|
||||
logger.exception("Node %s run failed", node.title)
|
||||
raise e
|
||||
|
||||
def _append_variables_recursively(self, node_id: str, variable_key_list: list[str], variable_value: VariableValue):
|
||||
|
||||
@@ -36,7 +36,7 @@ class StreamProcessor(ABC):
|
||||
reachable_node_ids: list[str] = []
|
||||
unreachable_first_node_ids: list[str] = []
|
||||
if finished_node_id not in self.graph.edge_mapping:
|
||||
logger.warning(f"node {finished_node_id} has no edge mapping")
|
||||
logger.warning("node %s has no edge mapping", finished_node_id)
|
||||
return
|
||||
for edge in self.graph.edge_mapping[finished_node_id]:
|
||||
if (
|
||||
|
||||
@@ -65,7 +65,7 @@ class BaseNode:
|
||||
try:
|
||||
result = self._run()
|
||||
except Exception as e:
|
||||
logger.exception(f"Node {self.node_id} failed to run")
|
||||
logger.exception("Node %s failed to run", self.node_id)
|
||||
result = NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED,
|
||||
error=str(e),
|
||||
|
||||
@@ -363,7 +363,7 @@ def _extract_text_from_docx(file_content: bytes) -> str:
|
||||
|
||||
text.append(markdown_table)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to extract table from DOC: {e}")
|
||||
logger.warning("Failed to extract table from DOC: %s", e)
|
||||
continue
|
||||
|
||||
return "\n".join(text)
|
||||
|
||||
@@ -129,7 +129,7 @@ class HttpRequestNode(BaseNode):
|
||||
},
|
||||
)
|
||||
except HttpRequestNodeError as e:
|
||||
logger.warning(f"http request node {self.node_id} failed to run: {e}")
|
||||
logger.warning("http request node %s failed to run: %s", self.node_id, e)
|
||||
return NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED,
|
||||
error=str(e),
|
||||
|
||||
@@ -129,7 +129,7 @@ class IfElseNode(BaseNode):
|
||||
var_mapping: dict[str, list[str]] = {}
|
||||
for case in typed_node_data.cases or []:
|
||||
for condition in case.conditions:
|
||||
key = "{}.#{}#".format(node_id, ".".join(condition.variable_selector))
|
||||
key = f"{node_id}.#{'.'.join(condition.variable_selector)}#"
|
||||
var_mapping[key] = condition.variable_selector
|
||||
|
||||
return var_mapping
|
||||
|
||||
@@ -616,7 +616,7 @@ class IterationNode(BaseNode):
|
||||
)
|
||||
|
||||
except IterationNodeError as e:
|
||||
logger.warning(f"Iteration run failed:{str(e)}")
|
||||
logger.warning("Iteration run failed:%s", str(e))
|
||||
yield IterationRunFailedEvent(
|
||||
iteration_id=self.id,
|
||||
iteration_node_id=self.node_id,
|
||||
|
||||
@@ -670,7 +670,7 @@ class ParameterExtractorNode(BaseNode):
|
||||
return cast(dict, json.loads(json_str))
|
||||
except Exception:
|
||||
pass
|
||||
logger.info(f"extra error: {result}")
|
||||
logger.info("extra error: %s", result)
|
||||
return None
|
||||
|
||||
def _extract_json_from_tool_call(self, tool_call: AssistantPromptMessage.ToolCall) -> Optional[dict]:
|
||||
@@ -690,7 +690,7 @@ class ParameterExtractorNode(BaseNode):
|
||||
return cast(dict, json.loads(json_str))
|
||||
except Exception:
|
||||
pass
|
||||
logger.info(f"extra error: {result}")
|
||||
logger.info("extra error: %s", result)
|
||||
return None
|
||||
|
||||
def _generate_default_result(self, data: ParameterExtractorNodeData) -> dict:
|
||||
|
||||
@@ -67,7 +67,7 @@ class WorkflowEntry:
|
||||
# check call depth
|
||||
workflow_call_max_depth = dify_config.WORKFLOW_CALL_MAX_DEPTH
|
||||
if call_depth > workflow_call_max_depth:
|
||||
raise ValueError("Max workflow call depth {} reached.".format(workflow_call_max_depth))
|
||||
raise ValueError(f"Max workflow call depth {workflow_call_max_depth} reached.")
|
||||
|
||||
# init workflow run state
|
||||
graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter())
|
||||
@@ -193,7 +193,13 @@ class WorkflowEntry:
|
||||
# run node
|
||||
generator = node.run()
|
||||
except Exception as e:
|
||||
logger.exception(f"error while running node, {workflow.id=}, {node.id=}, {node.type_=}, {node.version()=}")
|
||||
logger.exception(
|
||||
"error while running node, workflow_id=%s, node_id=%s, node_type=%s, node_version=%s",
|
||||
workflow.id,
|
||||
node.id,
|
||||
node.type_,
|
||||
node.version(),
|
||||
)
|
||||
raise WorkflowNodeRunFailedError(node=node, err_msg=str(e))
|
||||
return node, generator
|
||||
|
||||
@@ -297,7 +303,12 @@ class WorkflowEntry:
|
||||
|
||||
return node, generator
|
||||
except Exception as e:
|
||||
logger.exception(f"error while running node, {node.id=}, {node.type_=}, {node.version()=}")
|
||||
logger.exception(
|
||||
"error while running node, node_id=%s, node_type=%s, node_version=%s",
|
||||
node.id,
|
||||
node.type_,
|
||||
node.version(),
|
||||
)
|
||||
raise WorkflowNodeRunFailedError(node=node, err_msg=str(e))
|
||||
|
||||
@staticmethod
|
||||
|
||||
Reference in New Issue
Block a user