remove unused dropStrategy config setting for event queues

This commit is contained in:
Göran Sander
2025-11-17 22:55:13 +01:00
parent e536f0e18f
commit 7e03cefbe1
8 changed files with 9 additions and 32 deletions

View File

@@ -55,8 +55,7 @@ Butler-SOS:
# Message queue settings # Message queue settings
messageQueue: messageQueue:
maxConcurrent: 10 # Max concurrent message processing maxConcurrent: 10 # Max concurrent message processing
maxSize: 200 # Max queue size before dropping maxSize: 200 # Max queue size before rejecting
dropStrategy: oldest # Drop 'oldest' or 'newest'
backpressureThreshold: 80 # Warn at this % utilization backpressureThreshold: 80 # Warn at this % utilization
# Rate limiting (optional) # Rate limiting (optional)
@@ -91,7 +90,6 @@ Butler-SOS:
messageQueue: messageQueue:
maxConcurrent: 10 maxConcurrent: 10
maxSize: 200 maxSize: 200
dropStrategy: oldest
backpressureThreshold: 80 backpressureThreshold: 80
rateLimit: rateLimit:
@@ -114,11 +112,7 @@ Butler-SOS:
- **maxConcurrent** (default: 10): Number of messages processed simultaneously. Higher values = more throughput but more CPU/memory usage. Recommended: 5-20 depending on server capacity. - **maxConcurrent** (default: 10): Number of messages processed simultaneously. Higher values = more throughput but more CPU/memory usage. Recommended: 5-20 depending on server capacity.
- **maxSize** (default: 200): Maximum queue size. When exceeded, messages are dropped. Higher values provide more buffer during spikes but use more memory. Recommended: 100-500. - **maxSize** (default: 200): Maximum queue size. When exceeded, new messages are rejected and dropped. Higher values provide more buffer during spikes but use more memory. Recommended: 100-500. Note: Queue size only counts pending messages (not currently processing), so total capacity is maxSize + maxConcurrent.
- **dropStrategy** (default: 'oldest'): Which messages to drop when queue is full:
- `oldest`: Drop oldest queued messages (FIFO) - keeps most recent data
- `newest`: Drop newest incoming messages - preserves historical sequence
- **backpressureThreshold** (default: 80): Queue utilization percentage that triggers warnings. Recommended: 70-90%. - **backpressureThreshold** (default: 80): Queue utilization percentage that triggers warnings. Recommended: 70-90%.
@@ -173,7 +167,6 @@ Butler-SOS:
messageQueue: messageQueue:
maxConcurrent: 10 maxConcurrent: 10
maxSize: 200 maxSize: 200
dropStrategy: oldest
backpressureThreshold: 80 backpressureThreshold: 80
rateLimit: rateLimit:
enable: false enable: false

View File

@@ -124,7 +124,6 @@ Butler-SOS:
messageQueue: messageQueue:
maxConcurrent: 10 # Max number of messages being processed simultaneously (default: 10) maxConcurrent: 10 # Max number of messages being processed simultaneously (default: 10)
maxSize: 200 # Max queue size before messages are dropped (default: 200) maxSize: 200 # Max queue size before messages are dropped (default: 200)
dropStrategy: oldest # Drop 'oldest' or 'newest' messages when queue is full (default: oldest)
backpressureThreshold: 80 # Warn when queue utilization reaches this % (default: 80) backpressureThreshold: 80 # Warn when queue utilization reaches this % (default: 80)
# Rate limiting to prevent message flooding # Rate limiting to prevent message flooding
rateLimit: rateLimit:
@@ -181,7 +180,6 @@ Butler-SOS:
messageQueue: messageQueue:
maxConcurrent: 10 # Max number of messages being processed simultaneously (default: 10) maxConcurrent: 10 # Max number of messages being processed simultaneously (default: 10)
maxSize: 200 # Max queue size before messages are dropped (default: 200) maxSize: 200 # Max queue size before messages are dropped (default: 200)
dropStrategy: oldest # Drop 'oldest' or 'newest' messages when queue is full (default: oldest)
backpressureThreshold: 80 # Warn when queue utilization reaches this % (default: 80) backpressureThreshold: 80 # Warn when queue utilization reaches this % (default: 80)
# Rate limiting to prevent message flooding # Rate limiting to prevent message flooding
rateLimit: rateLimit:

View File

@@ -93,7 +93,6 @@ describe('config-file-schema', () => {
messageQueue: { messageQueue: {
maxConcurrent: 10, maxConcurrent: 10,
maxSize: 200, maxSize: 200,
dropStrategy: 'oldest',
backpressureThreshold: 80, backpressureThreshold: 80,
}, },
rateLimit: { rateLimit: {
@@ -128,7 +127,6 @@ describe('config-file-schema', () => {
messageQueue: { messageQueue: {
maxConcurrent: 10, maxConcurrent: 10,
maxSize: 200, maxSize: 200,
dropStrategy: 'oldest',
backpressureThreshold: 80, backpressureThreshold: 80,
}, },
rateLimit: { rateLimit: {

View File

@@ -64,7 +64,6 @@ describe('UdpQueueManager', () => {
messageQueue: { messageQueue: {
maxConcurrent: 5, maxConcurrent: 5,
maxSize: 10, maxSize: 10,
dropStrategy: 'oldest',
backpressureThreshold: 80, backpressureThreshold: 80,
}, },
rateLimit: { rateLimit: {

View File

@@ -34,12 +34,7 @@ export const logEventsSchema = {
}, },
backpressureThreshold: { type: 'number', default: 80 }, backpressureThreshold: { type: 'number', default: 80 },
}, },
required: [ required: ['maxConcurrent', 'maxSize', 'backpressureThreshold'],
'maxConcurrent',
'maxSize',
'dropStrategy',
'backpressureThreshold',
],
additionalProperties: false, additionalProperties: false,
}, },
rateLimit: { rateLimit: {

View File

@@ -46,12 +46,7 @@ export const userEventsSchema = {
}, },
backpressureThreshold: { type: 'number', default: 80 }, backpressureThreshold: { type: 'number', default: 80 },
}, },
required: [ required: ['maxConcurrent', 'maxSize', 'backpressureThreshold'],
'maxConcurrent',
'maxSize',
'dropStrategy',
'backpressureThreshold',
],
additionalProperties: false, additionalProperties: false,
}, },
rateLimit: { rateLimit: {

View File

@@ -2019,7 +2019,7 @@ export async function postUserEventQueueMetricsToInfluxdb() {
} }
try { try {
globals.influx.writePoints([point]); await globals.influx.writePoints([point]);
globals.logger.verbose( globals.logger.verbose(
'USER EVENT QUEUE METRICS INFLUXDB: Sent queue metrics data to InfluxDB v1' 'USER EVENT QUEUE METRICS INFLUXDB: Sent queue metrics data to InfluxDB v1'
); );
@@ -2172,7 +2172,7 @@ export async function postLogEventQueueMetricsToInfluxdb() {
} }
try { try {
globals.influx.writePoints([point]); await globals.influx.writePoints([point]);
globals.logger.verbose( globals.logger.verbose(
'LOG EVENT QUEUE METRICS INFLUXDB: Sent queue metrics data to InfluxDB v1' 'LOG EVENT QUEUE METRICS INFLUXDB: Sent queue metrics data to InfluxDB v1'
); );

View File

@@ -189,10 +189,9 @@ export class UdpQueueManager {
* *
* @param {object} config - Configuration object * @param {object} config - Configuration object
* @param {object} config.messageQueue - Queue configuration * @param {object} config.messageQueue - Queue configuration
* @param {number} config.messageQueue.maxConcurrent - Max concurrent message processing * @param {number} config.messageQueue.maxConcurrent - Maximum concurrent operations
* @param {number} config.messageQueue.maxSize - Max queue size * @param {number} config.messageQueue.maxSize - Maximum queue size
* @param {string} config.messageQueue.dropStrategy - Drop strategy ('oldest' or 'newest') * @param {number} config.messageQueue.backpressureThreshold - Backpressure threshold percentage
* @param {number} config.messageQueue.backpressureThreshold - Backpressure warning threshold (%)
* @param {object} config.rateLimit - Rate limit configuration * @param {object} config.rateLimit - Rate limit configuration
* @param {boolean} config.rateLimit.enable - Enable rate limiting * @param {boolean} config.rateLimit.enable - Enable rate limiting
* @param {number} config.rateLimit.maxMessagesPerMinute - Max messages per minute * @param {number} config.rateLimit.maxMessagesPerMinute - Max messages per minute