mirror of
https://github.com/langgenius/dify.git
synced 2025-12-19 17:27:16 -05:00
ruff check preview (#25653)
Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
@@ -51,9 +51,7 @@ def cleanup() -> None:
|
||||
if sys.stdin.isatty():
|
||||
log.separator()
|
||||
log.warning("This action cannot be undone!")
|
||||
confirmation = input(
|
||||
"Are you sure you want to remove all config and report files? (yes/no): "
|
||||
)
|
||||
confirmation = input("Are you sure you want to remove all config and report files? (yes/no): ")
|
||||
|
||||
if confirmation.lower() not in ["yes", "y"]:
|
||||
log.error("Cleanup cancelled.")
|
||||
|
||||
@@ -3,4 +3,4 @@
|
||||
from .config_helper import config_helper
|
||||
from .logger_helper import Logger, ProgressLogger
|
||||
|
||||
__all__ = ["config_helper", "Logger", "ProgressLogger"]
|
||||
__all__ = ["Logger", "ProgressLogger", "config_helper"]
|
||||
|
||||
@@ -65,9 +65,9 @@ class ConfigHelper:
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(config_path, "r") as f:
|
||||
with open(config_path) as f:
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
except (OSError, json.JSONDecodeError) as e:
|
||||
print(f"❌ Error reading {filename}: {e}")
|
||||
return None
|
||||
|
||||
@@ -101,7 +101,7 @@ class ConfigHelper:
|
||||
with open(config_path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
return True
|
||||
except IOError as e:
|
||||
except OSError as e:
|
||||
print(f"❌ Error writing {filename}: {e}")
|
||||
return False
|
||||
|
||||
@@ -133,7 +133,7 @@ class ConfigHelper:
|
||||
try:
|
||||
config_path.unlink()
|
||||
return True
|
||||
except IOError as e:
|
||||
except OSError as e:
|
||||
print(f"❌ Error deleting {filename}: {e}")
|
||||
return False
|
||||
|
||||
@@ -148,9 +148,9 @@ class ConfigHelper:
|
||||
return None
|
||||
|
||||
try:
|
||||
with open(state_path, "r") as f:
|
||||
with open(state_path) as f:
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
except (OSError, json.JSONDecodeError) as e:
|
||||
print(f"❌ Error reading {self.state_file}: {e}")
|
||||
return None
|
||||
|
||||
@@ -170,7 +170,7 @@ class ConfigHelper:
|
||||
with open(state_path, "w") as f:
|
||||
json.dump(data, f, indent=2)
|
||||
return True
|
||||
except IOError as e:
|
||||
except OSError as e:
|
||||
print(f"❌ Error writing {self.state_file}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
@@ -159,9 +159,7 @@ class ProgressLogger:
|
||||
|
||||
if self.logger.use_colors:
|
||||
progress_bar = self._create_progress_bar()
|
||||
print(
|
||||
f"\n\033[1m[Step {self.current_step}/{self.total_steps}]\033[0m {progress_bar}"
|
||||
)
|
||||
print(f"\n\033[1m[Step {self.current_step}/{self.total_steps}]\033[0m {progress_bar}")
|
||||
self.logger.step(f"{description} (Elapsed: {elapsed:.1f}s)")
|
||||
else:
|
||||
print(f"\n[Step {self.current_step}/{self.total_steps}]")
|
||||
|
||||
@@ -6,8 +6,7 @@ from pathlib import Path
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
from common import config_helper
|
||||
from common import Logger
|
||||
from common import Logger, config_helper
|
||||
|
||||
|
||||
def configure_openai_plugin() -> None:
|
||||
@@ -72,29 +71,19 @@ def configure_openai_plugin() -> None:
|
||||
|
||||
if response.status_code == 200:
|
||||
log.success("OpenAI plugin configured successfully!")
|
||||
log.key_value(
|
||||
"API Base", config_payload["credentials"]["openai_api_base"]
|
||||
)
|
||||
log.key_value(
|
||||
"API Key", config_payload["credentials"]["openai_api_key"]
|
||||
)
|
||||
log.key_value("API Base", config_payload["credentials"]["openai_api_base"])
|
||||
log.key_value("API Key", config_payload["credentials"]["openai_api_key"])
|
||||
|
||||
elif response.status_code == 201:
|
||||
log.success("OpenAI plugin credentials created successfully!")
|
||||
log.key_value(
|
||||
"API Base", config_payload["credentials"]["openai_api_base"]
|
||||
)
|
||||
log.key_value(
|
||||
"API Key", config_payload["credentials"]["openai_api_key"]
|
||||
)
|
||||
log.key_value("API Base", config_payload["credentials"]["openai_api_base"])
|
||||
log.key_value("API Key", config_payload["credentials"]["openai_api_key"])
|
||||
|
||||
elif response.status_code == 401:
|
||||
log.error("Configuration failed: Unauthorized")
|
||||
log.info("Token may have expired. Please run login_admin.py again")
|
||||
else:
|
||||
log.error(
|
||||
f"Configuration failed with status code: {response.status_code}"
|
||||
)
|
||||
log.error(f"Configuration failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
|
||||
@@ -5,10 +5,10 @@ from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
import json
|
||||
from common import config_helper
|
||||
from common import Logger
|
||||
|
||||
import httpx
|
||||
from common import Logger, config_helper
|
||||
|
||||
|
||||
def create_api_key() -> None:
|
||||
@@ -90,9 +90,7 @@ def create_api_key() -> None:
|
||||
}
|
||||
|
||||
if config_helper.write_config("api_key_config", api_key_config):
|
||||
log.info(
|
||||
f"API key saved to: {config_helper.get_config_path('benchmark_state')}"
|
||||
)
|
||||
log.info(f"API key saved to: {config_helper.get_config_path('benchmark_state')}")
|
||||
else:
|
||||
log.error("No API token received")
|
||||
log.debug(f"Response: {json.dumps(response_data, indent=2)}")
|
||||
@@ -101,9 +99,7 @@ def create_api_key() -> None:
|
||||
log.error("API key creation failed: Unauthorized")
|
||||
log.info("Token may have expired. Please run login_admin.py again")
|
||||
else:
|
||||
log.error(
|
||||
f"API key creation failed with status code: {response.status_code}"
|
||||
)
|
||||
log.error(f"API key creation failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
|
||||
@@ -5,9 +5,10 @@ from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
import json
|
||||
from common import config_helper, Logger
|
||||
|
||||
import httpx
|
||||
from common import Logger, config_helper
|
||||
|
||||
|
||||
def import_workflow_app() -> None:
|
||||
@@ -30,7 +31,7 @@ def import_workflow_app() -> None:
|
||||
log.error(f"DSL file not found: {dsl_path}")
|
||||
return
|
||||
|
||||
with open(dsl_path, "r") as f:
|
||||
with open(dsl_path) as f:
|
||||
yaml_content = f.read()
|
||||
|
||||
log.step("Importing workflow app from DSL...")
|
||||
@@ -86,9 +87,7 @@ def import_workflow_app() -> None:
|
||||
log.success("Workflow app imported successfully!")
|
||||
log.key_value("App ID", app_id)
|
||||
log.key_value("App Mode", response_data.get("app_mode"))
|
||||
log.key_value(
|
||||
"DSL Version", response_data.get("imported_dsl_version")
|
||||
)
|
||||
log.key_value("DSL Version", response_data.get("imported_dsl_version"))
|
||||
|
||||
# Save app_id to config
|
||||
app_config = {
|
||||
@@ -99,9 +98,7 @@ def import_workflow_app() -> None:
|
||||
}
|
||||
|
||||
if config_helper.write_config("app_config", app_config):
|
||||
log.info(
|
||||
f"App config saved to: {config_helper.get_config_path('benchmark_state')}"
|
||||
)
|
||||
log.info(f"App config saved to: {config_helper.get_config_path('benchmark_state')}")
|
||||
else:
|
||||
log.error("Import completed but no app_id received")
|
||||
log.debug(f"Response: {json.dumps(response_data, indent=2)}")
|
||||
|
||||
@@ -5,10 +5,10 @@ from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
import time
|
||||
from common import config_helper
|
||||
from common import Logger
|
||||
|
||||
import httpx
|
||||
from common import Logger, config_helper
|
||||
|
||||
|
||||
def install_openai_plugin() -> None:
|
||||
@@ -28,9 +28,7 @@ def install_openai_plugin() -> None:
|
||||
|
||||
# API endpoint for plugin installation
|
||||
base_url = "http://localhost:5001"
|
||||
install_endpoint = (
|
||||
f"{base_url}/console/api/workspaces/current/plugin/install/marketplace"
|
||||
)
|
||||
install_endpoint = f"{base_url}/console/api/workspaces/current/plugin/install/marketplace"
|
||||
|
||||
# Plugin identifier
|
||||
plugin_payload = {
|
||||
@@ -83,9 +81,7 @@ def install_openai_plugin() -> None:
|
||||
log.info("Polling for task completion...")
|
||||
|
||||
# Poll for task completion
|
||||
task_endpoint = (
|
||||
f"{base_url}/console/api/workspaces/current/plugin/tasks/{task_id}"
|
||||
)
|
||||
task_endpoint = f"{base_url}/console/api/workspaces/current/plugin/tasks/{task_id}"
|
||||
|
||||
max_attempts = 30 # 30 attempts with 2 second delay = 60 seconds max
|
||||
attempt = 0
|
||||
@@ -131,9 +127,7 @@ def install_openai_plugin() -> None:
|
||||
plugins = task_info.get("plugins", [])
|
||||
if plugins:
|
||||
for plugin in plugins:
|
||||
log.list_item(
|
||||
f"{plugin.get('plugin_id')}: {plugin.get('message')}"
|
||||
)
|
||||
log.list_item(f"{plugin.get('plugin_id')}: {plugin.get('message')}")
|
||||
break
|
||||
|
||||
# Continue polling if status is "pending" or other
|
||||
@@ -149,9 +143,7 @@ def install_openai_plugin() -> None:
|
||||
log.warning("Plugin may already be installed")
|
||||
log.debug(f"Response: {response.text}")
|
||||
else:
|
||||
log.error(
|
||||
f"Installation failed with status code: {response.status_code}"
|
||||
)
|
||||
log.error(f"Installation failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
|
||||
@@ -5,10 +5,10 @@ from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
import json
|
||||
from common import config_helper
|
||||
from common import Logger
|
||||
|
||||
import httpx
|
||||
from common import Logger, config_helper
|
||||
|
||||
|
||||
def login_admin() -> None:
|
||||
@@ -77,16 +77,10 @@ def login_admin() -> None:
|
||||
|
||||
# Save token config
|
||||
if config_helper.write_config("token_config", token_config):
|
||||
log.info(
|
||||
f"Token saved to: {config_helper.get_config_path('benchmark_state')}"
|
||||
)
|
||||
log.info(f"Token saved to: {config_helper.get_config_path('benchmark_state')}")
|
||||
|
||||
# Show truncated token for verification
|
||||
token_display = (
|
||||
f"{access_token[:20]}..."
|
||||
if len(access_token) > 20
|
||||
else "Token saved"
|
||||
)
|
||||
token_display = f"{access_token[:20]}..." if len(access_token) > 20 else "Token saved"
|
||||
log.key_value("Access token", token_display)
|
||||
|
||||
elif response.status_code == 401:
|
||||
|
||||
@@ -3,8 +3,10 @@
|
||||
import json
|
||||
import time
|
||||
import uuid
|
||||
from typing import Any, Iterator
|
||||
from flask import Flask, request, jsonify, Response
|
||||
from collections.abc import Iterator
|
||||
from typing import Any
|
||||
|
||||
from flask import Flask, Response, jsonify, request
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
|
||||
@@ -5,10 +5,10 @@ from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
import json
|
||||
from common import config_helper
|
||||
from common import Logger
|
||||
|
||||
import httpx
|
||||
from common import Logger, config_helper
|
||||
|
||||
|
||||
def publish_workflow() -> None:
|
||||
@@ -79,9 +79,7 @@ def publish_workflow() -> None:
|
||||
try:
|
||||
response_data = response.json()
|
||||
if response_data:
|
||||
log.debug(
|
||||
f"Response: {json.dumps(response_data, indent=2)}"
|
||||
)
|
||||
log.debug(f"Response: {json.dumps(response_data, indent=2)}")
|
||||
except json.JSONDecodeError:
|
||||
# Response might be empty or non-JSON
|
||||
pass
|
||||
@@ -93,9 +91,7 @@ def publish_workflow() -> None:
|
||||
log.error("Workflow publish failed: App not found")
|
||||
log.info("Make sure the app was imported successfully")
|
||||
else:
|
||||
log.error(
|
||||
f"Workflow publish failed with status code: {response.status_code}"
|
||||
)
|
||||
log.error(f"Workflow publish failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
|
||||
@@ -5,9 +5,10 @@ from pathlib import Path
|
||||
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
import json
|
||||
from common import config_helper, Logger
|
||||
|
||||
import httpx
|
||||
from common import Logger, config_helper
|
||||
|
||||
|
||||
def run_workflow(question: str = "fake question", streaming: bool = True) -> None:
|
||||
@@ -70,9 +71,7 @@ def run_workflow(question: str = "fake question", streaming: bool = True) -> Non
|
||||
event = data.get("event")
|
||||
|
||||
if event == "workflow_started":
|
||||
log.progress(
|
||||
f"Workflow started: {data.get('data', {}).get('id')}"
|
||||
)
|
||||
log.progress(f"Workflow started: {data.get('data', {}).get('id')}")
|
||||
elif event == "node_started":
|
||||
node_data = data.get("data", {})
|
||||
log.progress(
|
||||
@@ -116,9 +115,7 @@ def run_workflow(question: str = "fake question", streaming: bool = True) -> Non
|
||||
# Some lines might not be JSON
|
||||
pass
|
||||
else:
|
||||
log.error(
|
||||
f"Workflow run failed with status code: {response.status_code}"
|
||||
)
|
||||
log.error(f"Workflow run failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
else:
|
||||
# Handle blocking response
|
||||
@@ -142,9 +139,7 @@ def run_workflow(question: str = "fake question", streaming: bool = True) -> Non
|
||||
log.info("📤 Final Answer:")
|
||||
log.info(outputs.get("answer"), indent=2)
|
||||
else:
|
||||
log.error(
|
||||
f"Workflow run failed with status code: {response.status_code}"
|
||||
)
|
||||
log.error(f"Workflow run failed with status code: {response.status_code}")
|
||||
log.debug(f"Response: {response.text}")
|
||||
|
||||
except httpx.ConnectError:
|
||||
|
||||
@@ -6,7 +6,7 @@ from pathlib import Path
|
||||
sys.path.append(str(Path(__file__).parent.parent))
|
||||
|
||||
import httpx
|
||||
from common import config_helper, Logger
|
||||
from common import Logger, config_helper
|
||||
|
||||
|
||||
def setup_admin_account() -> None:
|
||||
@@ -24,9 +24,7 @@ def setup_admin_account() -> None:
|
||||
|
||||
# Save credentials to config file
|
||||
if config_helper.write_config("admin_config", admin_config):
|
||||
log.info(
|
||||
f"Admin credentials saved to: {config_helper.get_config_path('benchmark_state')}"
|
||||
)
|
||||
log.info(f"Admin credentials saved to: {config_helper.get_config_path('benchmark_state')}")
|
||||
|
||||
# API setup endpoint
|
||||
base_url = "http://localhost:5001"
|
||||
@@ -56,9 +54,7 @@ def setup_admin_account() -> None:
|
||||
log.key_value("Username", admin_config["username"])
|
||||
|
||||
elif response.status_code == 400:
|
||||
log.warning(
|
||||
"Setup may have already been completed or invalid data provided"
|
||||
)
|
||||
log.warning("Setup may have already been completed or invalid data provided")
|
||||
log.debug(f"Response: {response.text}")
|
||||
else:
|
||||
log.error(f"Setup failed with status code: {response.status_code}")
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import socket
|
||||
from pathlib import Path
|
||||
|
||||
from common import Logger, ProgressLogger
|
||||
@@ -93,9 +93,7 @@ def main() -> None:
|
||||
if retry.lower() in ["yes", "y"]:
|
||||
return main() # Recursively call main to check again
|
||||
else:
|
||||
print(
|
||||
"❌ Setup cancelled. Please start the required services and try again."
|
||||
)
|
||||
print("❌ Setup cancelled. Please start the required services and try again.")
|
||||
sys.exit(1)
|
||||
|
||||
log.success("All required services are running!")
|
||||
|
||||
@@ -7,29 +7,28 @@ measuring key metrics like connection rate, event throughput, and time to first
|
||||
"""
|
||||
|
||||
import json
|
||||
import time
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import statistics
|
||||
import sys
|
||||
import threading
|
||||
import os
|
||||
import logging
|
||||
import statistics
|
||||
from pathlib import Path
|
||||
import time
|
||||
from collections import deque
|
||||
from dataclasses import asdict, dataclass
|
||||
from datetime import datetime
|
||||
from dataclasses import dataclass, asdict
|
||||
from locust import HttpUser, task, between, events, constant
|
||||
from typing import TypedDict, Literal, TypeAlias
|
||||
from pathlib import Path
|
||||
from typing import Literal, TypeAlias, TypedDict
|
||||
|
||||
import requests.exceptions
|
||||
from locust import HttpUser, between, constant, events, task
|
||||
|
||||
# Add the stress-test directory to path to import common modules
|
||||
sys.path.insert(0, str(Path(__file__).parent))
|
||||
from common.config_helper import ConfigHelper # type: ignore[import-not-found]
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Configuration from environment
|
||||
@@ -54,6 +53,7 @@ ErrorType: TypeAlias = Literal[
|
||||
|
||||
class ErrorCounts(TypedDict):
|
||||
"""Error count tracking"""
|
||||
|
||||
connection_error: int
|
||||
timeout: int
|
||||
invalid_json: int
|
||||
@@ -65,6 +65,7 @@ class ErrorCounts(TypedDict):
|
||||
|
||||
class SSEEvent(TypedDict):
|
||||
"""Server-Sent Event structure"""
|
||||
|
||||
data: str
|
||||
event: str
|
||||
id: str | None
|
||||
@@ -72,11 +73,13 @@ class SSEEvent(TypedDict):
|
||||
|
||||
class WorkflowInputs(TypedDict):
|
||||
"""Workflow input structure"""
|
||||
|
||||
question: str
|
||||
|
||||
|
||||
class WorkflowRequestData(TypedDict):
|
||||
"""Workflow request payload"""
|
||||
|
||||
inputs: WorkflowInputs
|
||||
response_mode: Literal["streaming"]
|
||||
user: str
|
||||
@@ -84,6 +87,7 @@ class WorkflowRequestData(TypedDict):
|
||||
|
||||
class ParsedEventData(TypedDict, total=False):
|
||||
"""Parsed event data from SSE stream"""
|
||||
|
||||
event: str
|
||||
task_id: str
|
||||
workflow_run_id: str
|
||||
@@ -93,6 +97,7 @@ class ParsedEventData(TypedDict, total=False):
|
||||
|
||||
class LocustStats(TypedDict):
|
||||
"""Locust statistics structure"""
|
||||
|
||||
total_requests: int
|
||||
total_failures: int
|
||||
avg_response_time: float
|
||||
@@ -102,6 +107,7 @@ class LocustStats(TypedDict):
|
||||
|
||||
class ReportData(TypedDict):
|
||||
"""JSON report structure"""
|
||||
|
||||
timestamp: str
|
||||
duration_seconds: float
|
||||
metrics: dict[str, object] # Metrics as dict for JSON serialization
|
||||
@@ -154,7 +160,7 @@ class MetricsTracker:
|
||||
self.total_connections = 0
|
||||
self.total_events = 0
|
||||
self.start_time = time.time()
|
||||
|
||||
|
||||
# Enhanced metrics with memory limits
|
||||
self.max_samples = 10000 # Prevent unbounded growth
|
||||
self.ttfe_samples: deque[float] = deque(maxlen=self.max_samples)
|
||||
@@ -233,9 +239,7 @@ class MetricsTracker:
|
||||
max_ttfe = max(self.ttfe_samples)
|
||||
p50_ttfe = statistics.median(self.ttfe_samples)
|
||||
if len(self.ttfe_samples) >= 2:
|
||||
quantiles = statistics.quantiles(
|
||||
self.ttfe_samples, n=20, method="inclusive"
|
||||
)
|
||||
quantiles = statistics.quantiles(self.ttfe_samples, n=20, method="inclusive")
|
||||
p95_ttfe = quantiles[18] # 19th of 19 quantiles = 95th percentile
|
||||
else:
|
||||
p95_ttfe = max_ttfe
|
||||
@@ -255,9 +259,7 @@ class MetricsTracker:
|
||||
if durations
|
||||
else 0
|
||||
)
|
||||
events_per_stream_avg = (
|
||||
statistics.mean(events_per_stream) if events_per_stream else 0
|
||||
)
|
||||
events_per_stream_avg = statistics.mean(events_per_stream) if events_per_stream else 0
|
||||
|
||||
# Calculate inter-event latency statistics
|
||||
all_inter_event_times = []
|
||||
@@ -268,32 +270,20 @@ class MetricsTracker:
|
||||
inter_event_latency_avg = statistics.mean(all_inter_event_times)
|
||||
inter_event_latency_p50 = statistics.median(all_inter_event_times)
|
||||
inter_event_latency_p95 = (
|
||||
statistics.quantiles(
|
||||
all_inter_event_times, n=20, method="inclusive"
|
||||
)[18]
|
||||
statistics.quantiles(all_inter_event_times, n=20, method="inclusive")[18]
|
||||
if len(all_inter_event_times) >= 2
|
||||
else max(all_inter_event_times)
|
||||
)
|
||||
else:
|
||||
inter_event_latency_avg = inter_event_latency_p50 = (
|
||||
inter_event_latency_p95
|
||||
) = 0
|
||||
inter_event_latency_avg = inter_event_latency_p50 = inter_event_latency_p95 = 0
|
||||
else:
|
||||
stream_duration_avg = stream_duration_p50 = stream_duration_p95 = (
|
||||
events_per_stream_avg
|
||||
) = 0
|
||||
inter_event_latency_avg = inter_event_latency_p50 = (
|
||||
inter_event_latency_p95
|
||||
) = 0
|
||||
stream_duration_avg = stream_duration_p50 = stream_duration_p95 = events_per_stream_avg = 0
|
||||
inter_event_latency_avg = inter_event_latency_p50 = inter_event_latency_p95 = 0
|
||||
|
||||
# Also calculate overall average rates
|
||||
total_elapsed = current_time - self.start_time
|
||||
overall_conn_rate = (
|
||||
self.total_connections / total_elapsed if total_elapsed > 0 else 0
|
||||
)
|
||||
overall_event_rate = (
|
||||
self.total_events / total_elapsed if total_elapsed > 0 else 0
|
||||
)
|
||||
overall_conn_rate = self.total_connections / total_elapsed if total_elapsed > 0 else 0
|
||||
overall_event_rate = self.total_events / total_elapsed if total_elapsed > 0 else 0
|
||||
|
||||
return MetricsSnapshot(
|
||||
active_connections=self.active_connections,
|
||||
@@ -389,7 +379,7 @@ class DifyWorkflowUser(HttpUser):
|
||||
|
||||
# Load questions from file or use defaults
|
||||
if QUESTIONS_FILE and os.path.exists(QUESTIONS_FILE):
|
||||
with open(QUESTIONS_FILE, "r") as f:
|
||||
with open(QUESTIONS_FILE) as f:
|
||||
self.questions = [line.strip() for line in f if line.strip()]
|
||||
else:
|
||||
self.questions = [
|
||||
@@ -451,18 +441,13 @@ class DifyWorkflowUser(HttpUser):
|
||||
try:
|
||||
# Validate response
|
||||
if response.status_code >= 400:
|
||||
error_type: ErrorType = (
|
||||
"http_4xx" if response.status_code < 500 else "http_5xx"
|
||||
)
|
||||
error_type: ErrorType = "http_4xx" if response.status_code < 500 else "http_5xx"
|
||||
metrics.record_error(error_type)
|
||||
response.failure(f"HTTP {response.status_code}")
|
||||
return
|
||||
|
||||
content_type = response.headers.get("Content-Type", "")
|
||||
if (
|
||||
"text/event-stream" not in content_type
|
||||
and "application/json" not in content_type
|
||||
):
|
||||
if "text/event-stream" not in content_type and "application/json" not in content_type:
|
||||
logger.error(f"Expected text/event-stream, got: {content_type}")
|
||||
metrics.record_error("invalid_response")
|
||||
response.failure(f"Invalid content type: {content_type}")
|
||||
@@ -473,10 +458,13 @@ class DifyWorkflowUser(HttpUser):
|
||||
|
||||
for line in response.iter_lines(decode_unicode=True):
|
||||
# Check if runner is stopping
|
||||
if getattr(self.environment.runner, 'state', '') in ('stopping', 'stopped'):
|
||||
if getattr(self.environment.runner, "state", "") in (
|
||||
"stopping",
|
||||
"stopped",
|
||||
):
|
||||
logger.debug("Runner stopping, breaking streaming loop")
|
||||
break
|
||||
|
||||
|
||||
if line is not None:
|
||||
bytes_received += len(line.encode("utf-8"))
|
||||
|
||||
@@ -489,9 +477,7 @@ class DifyWorkflowUser(HttpUser):
|
||||
|
||||
# Track inter-event timing
|
||||
if last_event_time:
|
||||
inter_event_times.append(
|
||||
(current_time - last_event_time) * 1000
|
||||
)
|
||||
inter_event_times.append((current_time - last_event_time) * 1000)
|
||||
last_event_time = current_time
|
||||
|
||||
if first_event_time is None:
|
||||
@@ -512,15 +498,11 @@ class DifyWorkflowUser(HttpUser):
|
||||
parsed_event: ParsedEventData = json.loads(event_data)
|
||||
# Check for terminal events
|
||||
if parsed_event.get("event") in TERMINAL_EVENTS:
|
||||
logger.debug(
|
||||
f"Received terminal event: {parsed_event.get('event')}"
|
||||
)
|
||||
logger.debug(f"Received terminal event: {parsed_event.get('event')}")
|
||||
request_success = True
|
||||
break
|
||||
except json.JSONDecodeError as e:
|
||||
logger.debug(
|
||||
f"JSON decode error: {e} for data: {event_data[:100]}"
|
||||
)
|
||||
logger.debug(f"JSON decode error: {e} for data: {event_data[:100]}")
|
||||
metrics.record_error("invalid_json")
|
||||
|
||||
except Exception as e:
|
||||
@@ -583,16 +565,18 @@ def on_test_start(environment: object, **kwargs: object) -> None:
|
||||
|
||||
# Periodic stats reporting
|
||||
def report_stats() -> None:
|
||||
if not hasattr(environment, 'runner'):
|
||||
if not hasattr(environment, "runner"):
|
||||
return
|
||||
runner = environment.runner
|
||||
while hasattr(runner, 'state') and runner.state not in ["stopped", "stopping"]:
|
||||
while hasattr(runner, "state") and runner.state not in ["stopped", "stopping"]:
|
||||
time.sleep(5) # Report every 5 seconds
|
||||
if hasattr(runner, 'state') and runner.state == "running":
|
||||
if hasattr(runner, "state") and runner.state == "running":
|
||||
stats = metrics.get_stats()
|
||||
|
||||
# Only log on master node in distributed mode
|
||||
is_master = not getattr(environment.runner, "worker_id", None) if hasattr(environment, 'runner') else True
|
||||
is_master = (
|
||||
not getattr(environment.runner, "worker_id", None) if hasattr(environment, "runner") else True
|
||||
)
|
||||
if is_master:
|
||||
# Clear previous lines and show updated stats
|
||||
logger.info("\n" + "=" * 80)
|
||||
@@ -623,15 +607,15 @@ def on_test_start(environment: object, **kwargs: object) -> None:
|
||||
logger.info(
|
||||
f"{'(TTFE in ms)':<25} {stats.ttfe_avg:>15.1f} {stats.ttfe_p50:>10.1f} {stats.ttfe_p95:>10.1f} {stats.ttfe_min:>10.1f} {stats.ttfe_max:>10.1f}"
|
||||
)
|
||||
logger.info(f"{'Window Samples':<25} {stats.ttfe_samples:>15,d} (last {min(10000, stats.ttfe_total_samples):,d} samples)")
|
||||
logger.info(
|
||||
f"{'Window Samples':<25} {stats.ttfe_samples:>15,d} (last {min(10000, stats.ttfe_total_samples):,d} samples)"
|
||||
)
|
||||
logger.info(f"{'Total Samples':<25} {stats.ttfe_total_samples:>15,d}")
|
||||
|
||||
# Inter-event latency
|
||||
if stats.inter_event_latency_avg > 0:
|
||||
logger.info("-" * 80)
|
||||
logger.info(
|
||||
f"{'INTER-EVENT LATENCY':<25} {'AVG':>15} {'P50':>10} {'P95':>10}"
|
||||
)
|
||||
logger.info(f"{'INTER-EVENT LATENCY':<25} {'AVG':>15} {'P50':>10} {'P95':>10}")
|
||||
logger.info(
|
||||
f"{'(ms between events)':<25} {stats.inter_event_latency_avg:>15.1f} {stats.inter_event_latency_p50:>10.1f} {stats.inter_event_latency_p95:>10.1f}"
|
||||
)
|
||||
@@ -647,9 +631,9 @@ def on_test_start(environment: object, **kwargs: object) -> None:
|
||||
logger.info("=" * 80)
|
||||
|
||||
# Show Locust stats summary
|
||||
if hasattr(environment, 'stats') and hasattr(environment.stats, 'total'):
|
||||
if hasattr(environment, "stats") and hasattr(environment.stats, "total"):
|
||||
total = environment.stats.total
|
||||
if hasattr(total, 'num_requests') and total.num_requests > 0:
|
||||
if hasattr(total, "num_requests") and total.num_requests > 0:
|
||||
logger.info(
|
||||
f"{'LOCUST STATS':<25} {'Requests':>12} {'Fails':>8} {'Avg (ms)':>12} {'Min':>8} {'Max':>8}"
|
||||
)
|
||||
@@ -687,21 +671,15 @@ def on_test_stop(environment: object, **kwargs: object) -> None:
|
||||
logger.info("")
|
||||
logger.info("EVENTS")
|
||||
logger.info(f" {'Total Events Received:':<30} {stats.total_events:>10,d}")
|
||||
logger.info(
|
||||
f" {'Average Throughput:':<30} {stats.overall_event_rate:>10.2f} events/s"
|
||||
)
|
||||
logger.info(
|
||||
f" {'Final Rate (10s window):':<30} {stats.event_rate:>10.2f} events/s"
|
||||
)
|
||||
logger.info(f" {'Average Throughput:':<30} {stats.overall_event_rate:>10.2f} events/s")
|
||||
logger.info(f" {'Final Rate (10s window):':<30} {stats.event_rate:>10.2f} events/s")
|
||||
|
||||
logger.info("")
|
||||
logger.info("STREAM METRICS")
|
||||
logger.info(f" {'Avg Stream Duration:':<30} {stats.stream_duration_avg:>10.1f} ms")
|
||||
logger.info(f" {'P50 Stream Duration:':<30} {stats.stream_duration_p50:>10.1f} ms")
|
||||
logger.info(f" {'P95 Stream Duration:':<30} {stats.stream_duration_p95:>10.1f} ms")
|
||||
logger.info(
|
||||
f" {'Avg Events per Stream:':<30} {stats.events_per_stream_avg:>10.1f}"
|
||||
)
|
||||
logger.info(f" {'Avg Events per Stream:':<30} {stats.events_per_stream_avg:>10.1f}")
|
||||
|
||||
logger.info("")
|
||||
logger.info("INTER-EVENT LATENCY")
|
||||
@@ -716,7 +694,9 @@ def on_test_stop(environment: object, **kwargs: object) -> None:
|
||||
logger.info(f" {'95th Percentile:':<30} {stats.ttfe_p95:>10.1f} ms")
|
||||
logger.info(f" {'Minimum:':<30} {stats.ttfe_min:>10.1f} ms")
|
||||
logger.info(f" {'Maximum:':<30} {stats.ttfe_max:>10.1f} ms")
|
||||
logger.info(f" {'Window Samples:':<30} {stats.ttfe_samples:>10,d} (last {min(10000, stats.ttfe_total_samples):,d})")
|
||||
logger.info(
|
||||
f" {'Window Samples:':<30} {stats.ttfe_samples:>10,d} (last {min(10000, stats.ttfe_total_samples):,d})"
|
||||
)
|
||||
logger.info(f" {'Total Samples:':<30} {stats.ttfe_total_samples:>10,d}")
|
||||
|
||||
# Error summary
|
||||
@@ -730,7 +710,7 @@ def on_test_stop(environment: object, **kwargs: object) -> None:
|
||||
logger.info("=" * 80 + "\n")
|
||||
|
||||
# Export machine-readable report (only on master node)
|
||||
is_master = not getattr(environment.runner, 'worker_id', None) if hasattr(environment, 'runner') else True
|
||||
is_master = not getattr(environment.runner, "worker_id", None) if hasattr(environment, "runner") else True
|
||||
if is_master:
|
||||
export_json_report(stats, test_duration, environment)
|
||||
|
||||
@@ -746,9 +726,9 @@ def export_json_report(stats: MetricsSnapshot, duration: float, environment: obj
|
||||
|
||||
# Access environment.stats.total attributes safely
|
||||
locust_stats: LocustStats | None = None
|
||||
if hasattr(environment, 'stats') and hasattr(environment.stats, 'total'):
|
||||
if hasattr(environment, "stats") and hasattr(environment.stats, "total"):
|
||||
total = environment.stats.total
|
||||
if hasattr(total, 'num_requests') and total.num_requests > 0:
|
||||
if hasattr(total, "num_requests") and total.num_requests > 0:
|
||||
locust_stats = LocustStats(
|
||||
total_requests=total.num_requests,
|
||||
total_failures=total.num_failures,
|
||||
|
||||
Reference in New Issue
Block a user