refactor: Better support for InfluxDB v2 databases

This commit is contained in:
Göran Sander
2025-12-16 11:25:21 +01:00
parent d05c0bb653
commit b84d99cd4a
31 changed files with 2972 additions and 3203 deletions

View File

@@ -502,11 +502,6 @@ Butler-SOS:
# Influx db config parameters
influxdbConfig:
enable: true
# Feature flag to enable refactored InfluxDB code (recommended for better maintainability)
# Set to true to use the new modular implementation, false for legacy code
# Note: v3 always uses refactored code (legacy v3 code has been removed)
# This flag only affects v1 and v2 implementations
useRefactoredCode: true
# Items below are mandatory if influxdbConfig.enable=true
host: influxdb.mycompany.com # InfluxDB host, hostname, FQDN or IP address
port: 8086 # Port where InfluxDBdb is listening, usually 8086

View File

@@ -35,10 +35,10 @@ jest.unstable_mockModule('../../globals.js', () => ({
}));
const globals = (await import('../../globals.js')).default;
jest.unstable_mockModule('../post-to-influxdb.js', () => ({
jest.unstable_mockModule('../influxdb/index.js', () => ({
postHealthMetricsToInfluxdb: jest.fn(),
}));
const { postHealthMetricsToInfluxdb } = await import('../post-to-influxdb.js');
const { postHealthMetricsToInfluxdb } = await import('../influxdb/index.js');
jest.unstable_mockModule('../post-to-new-relic.js', () => ({
postHealthMetricsToNewRelic: jest.fn(),

View File

@@ -1,503 +0,0 @@
import { jest, describe, test, expect, beforeEach, afterEach } from '@jest/globals';
// Mock the InfluxDB v2 client
jest.unstable_mockModule('@influxdata/influxdb-client', () => ({
Point: jest.fn().mockImplementation(() => ({
tag: jest.fn().mockReturnThis(),
floatField: jest.fn().mockReturnThis(),
intField: jest.fn().mockReturnThis(),
stringField: jest.fn().mockReturnThis(),
uintField: jest.fn().mockReturnThis(),
booleanField: jest.fn().mockReturnThis(), // <-- add this line
timestamp: jest.fn().mockReturnThis(),
})),
}));
// Mock the InfluxDB v3 client
jest.unstable_mockModule('@influxdata/influxdb3-client', () => ({
Point: jest.fn().mockImplementation(() => ({
setTag: jest.fn().mockReturnThis(),
setFloatField: jest.fn().mockReturnThis(),
setIntegerField: jest.fn().mockReturnThis(),
setStringField: jest.fn().mockReturnThis(),
setBooleanField: jest.fn().mockReturnThis(),
timestamp: jest.fn().mockReturnThis(),
toLineProtocol: jest.fn().mockReturnValue('mock-line-protocol'),
})),
}));
// Mock globals
jest.unstable_mockModule('../../globals.js', () => ({
default: {
logger: {
info: jest.fn(),
verbose: jest.fn(),
debug: jest.fn(),
error: jest.fn(),
warn: jest.fn(),
silly: jest.fn(),
},
config: {
get: jest.fn(),
has: jest.fn(),
},
influxDB: {
writeApi: {
writePoint: jest.fn(),
flush: jest.fn().mockResolvedValue(),
},
},
appNames: [],
getErrorMessage: jest.fn().mockImplementation((err) => err.toString()),
},
}));
describe('post-to-influxdb', () => {
let influxdb;
let globals;
let Point;
beforeEach(async () => {
jest.clearAllMocks();
// Get mocked modules
const influxdbClient = await import('@influxdata/influxdb-client');
Point = influxdbClient.Point;
globals = (await import('../../globals.js')).default;
// Mock globals.influx for InfluxDB v1 tests
globals.influx = { writePoints: jest.fn() };
// Import the module under test
influxdb = await import('../post-to-influxdb.js');
});
describe('storeEventCountInfluxDB', () => {
test('should not store events if no log events exist', async () => {
// Setup
globals.udpEvents = {
getLogEvents: jest.fn().mockResolvedValue([]),
getUserEvents: jest.fn().mockResolvedValue([]),
};
// Execute
await influxdb.storeEventCountInfluxDB();
// Verify
expect(globals.logger.verbose).toHaveBeenCalledWith(
expect.stringContaining('EVENT COUNT INFLUXDB: No events to store in InfluxDB')
);
expect(globals.influxDB.writeApi.writePoint).not.toHaveBeenCalled();
expect(globals.influxDB.writeApi.flush).not.toHaveBeenCalled();
});
test('should store log events to InfluxDB (InfluxDB v2)', async () => {
// Setup
globals.config.get = jest.fn((key) => {
if (key === 'Butler-SOS.influxdbConfig.version') return 2;
if (key === 'Butler-SOS.qlikSenseEvents.eventCount.influxdb.measurementName') {
return 'events_log';
}
if (key === 'Butler-SOS.influxdbConfig.v2Config.org') return 'test-org';
if (key === 'Butler-SOS.influxdbConfig.v2Config.bucket') return 'test-bucket';
return undefined;
});
const mockLogEvents = [
{
source: 'test-source',
host: 'test-host',
subsystem: 'test-subsystem',
counter: 5,
},
];
globals.udpEvents = {
getLogEvents: jest.fn().mockResolvedValue(mockLogEvents),
getUserEvents: jest.fn().mockResolvedValue([]),
};
// Mock v2 writeApi
globals.influx.getWriteApi = jest.fn().mockReturnValue({
writePoints: jest.fn(),
});
// Execute
await influxdb.storeEventCountInfluxDB();
// Verify
expect(globals.influx.getWriteApi).toHaveBeenCalled();
// The writeApi mock's writePoints should be called
const writeApi = globals.influx.getWriteApi.mock.results[0].value;
expect(writeApi.writePoints).toHaveBeenCalled();
expect(globals.logger.verbose).toHaveBeenCalledWith(
expect.stringContaining(
'EVENT COUNT INFLUXDB: Sent Butler SOS event count data to InfluxDB'
)
);
});
test('should store user events to InfluxDB (InfluxDB v2)', async () => {
// Setup
globals.config.get = jest.fn((key) => {
if (key === 'Butler-SOS.influxdbConfig.version') return 2;
if (key === 'Butler-SOS.qlikSenseEvents.eventCount.influxdb.measurementName') {
return 'events_user';
}
if (key === 'Butler-SOS.influxdbConfig.v2Config.org') return 'test-org';
if (key === 'Butler-SOS.influxdbConfig.v2Config.bucket') return 'test-bucket';
return undefined;
});
const mockUserEvents = [
{
source: 'test-source',
host: 'test-host',
subsystem: 'test-subsystem',
counter: 3,
},
];
globals.udpEvents = {
getLogEvents: jest.fn().mockResolvedValue([]),
getUserEvents: jest.fn().mockResolvedValue(mockUserEvents),
};
// Mock v2 writeApi
globals.influx.getWriteApi = jest.fn().mockReturnValue({
writePoints: jest.fn(),
});
// Execute
await influxdb.storeEventCountInfluxDB();
// Verify
expect(globals.influx.getWriteApi).toHaveBeenCalled();
// The writeApi mock's writePoints should be called
const writeApi = globals.influx.getWriteApi.mock.results[0].value;
expect(writeApi.writePoints).toHaveBeenCalled();
expect(globals.logger.verbose).toHaveBeenCalledWith(
expect.stringContaining(
'EVENT COUNT INFLUXDB: Sent Butler SOS event count data to InfluxDB'
)
);
});
test('should handle errors gracefully (InfluxDB v2)', async () => {
// Setup
globals.config.get = jest.fn((key) => {
if (key === 'Butler-SOS.influxdbConfig.version') return 2;
if (key === 'Butler-SOS.qlikSenseEvents.eventCount.influxdb.measurementName') {
return 'events_log';
}
if (key === 'Butler-SOS.influxdbConfig.v2Config.org') return 'test-org';
if (key === 'Butler-SOS.influxdbConfig.v2Config.bucket') return 'test-bucket';
return undefined;
});
// Provide at least one event so writePoints is called
globals.udpEvents = {
getLogEvents: jest.fn().mockResolvedValue([{}]),
getUserEvents: jest.fn().mockResolvedValue([]),
};
// Mock v2 writeApi to throw error on writePoints
globals.influx.getWriteApi = jest.fn().mockReturnValue({
writePoints: jest.fn(() => {
throw new Error('Test error');
}),
});
// Execute
await influxdb.storeEventCountInfluxDB();
// Verify
expect(globals.logger.error).toHaveBeenCalledWith(
expect.stringContaining(
'EVENT COUNT INFLUXDB: Error saving health data to InfluxDB v2! Error: Test error'
)
);
});
});
describe('storeRejectedEventCountInfluxDB', () => {
test('should not store events if no rejected events exist', async () => {
// Setup
globals.rejectedEvents = {
getRejectedLogEvents: jest.fn().mockResolvedValue([]),
};
// Execute
await influxdb.storeRejectedEventCountInfluxDB();
// Verify
expect(globals.logger.verbose).toHaveBeenCalledWith(
expect.stringContaining(
'REJECTED EVENT COUNT INFLUXDB: No events to store in InfluxDB'
)
);
expect(globals.influxDB.writeApi.writePoint).not.toHaveBeenCalled();
});
test('should store rejected events to InfluxDB (InfluxDB v2)', async () => {
// Setup
globals.config.get = jest.fn((key) => {
if (key === 'Butler-SOS.influxdbConfig.version') return 2;
if (key === 'Butler-SOS.influxdbConfig.v2Config.org') return 'test-org';
if (key === 'Butler-SOS.influxdbConfig.v2Config.bucket') return 'test-bucket';
if (
key === 'Butler-SOS.qlikSenseEvents.rejectedEventCount.influxdb.measurementName'
)
return 'events_rejected';
return undefined;
});
const mockRejectedEvents = [
{
source: 'test-source',
counter: 7,
},
];
globals.rejectedEvents = {
getRejectedLogEvents: jest.fn().mockResolvedValue(mockRejectedEvents),
};
// Mock v2 getWriteApi
const writeApiMock = { writePoints: jest.fn() };
globals.influx.getWriteApi = jest.fn().mockReturnValue(writeApiMock);
// Execute
await influxdb.storeRejectedEventCountInfluxDB();
// Verify
expect(Point).toHaveBeenCalledWith('events_rejected');
expect(globals.influx.getWriteApi).toHaveBeenCalled();
expect(writeApiMock.writePoints).toHaveBeenCalled();
expect(globals.logger.verbose).toHaveBeenCalledWith(
expect.stringContaining(
'REJECT LOG EVENT INFLUXDB: Sent Butler SOS rejected event count data to InfluxDB'
)
);
});
test('should handle errors gracefully (InfluxDB v2)', async () => {
// Setup
globals.config.get = jest.fn((key) => {
if (key === 'Butler-SOS.influxdbConfig.version') return 2;
if (key === 'Butler-SOS.influxdbConfig.v2Config.org') return 'test-org';
if (key === 'Butler-SOS.influxdbConfig.v2Config.bucket') return 'test-bucket';
return undefined;
});
const mockRejectedEvents = [
{
source: 'test-source',
counter: 7,
},
];
globals.rejectedEvents = {
getRejectedLogEvents: jest.fn().mockResolvedValue(mockRejectedEvents),
};
// Mock v2 getWriteApi and writePoints to throw
const writeApiMock = {
writePoints: jest.fn(() => {
throw new Error('Test error');
}),
};
globals.influx.getWriteApi = jest.fn().mockReturnValue(writeApiMock);
// Execute
await influxdb.storeRejectedEventCountInfluxDB();
// Verify
expect(globals.logger.error).toHaveBeenCalledWith(
expect.stringContaining(
'REJECTED LOG EVENT INFLUXDB: Error saving data to InfluxDB v2! Error: Test error'
)
);
});
});
describe('globals.config.get("Butler-SOS.influxdbConfig.version")', () => {
let influxdb;
let globals;
beforeEach(async () => {
jest.clearAllMocks();
influxdb = await import('../post-to-influxdb.js');
globals = (await import('../../globals.js')).default;
globals.influx = { writePoints: jest.fn() };
globals.influxWriteApi = [
{ serverName: 'test-server', writeAPI: { writePoints: jest.fn() } },
];
});
test('should use InfluxDB v2 path when version is 2', async () => {
globals.config.get = jest.fn((key) => {
if (key === 'Butler-SOS.influxdbConfig.version') return 2;
if (key === 'Butler-SOS.influxdbConfig.includeFields.activeDocs') return false;
if (key === 'Butler-SOS.influxdbConfig.includeFields.loadedDocs') return false;
if (key === 'Butler-SOS.influxdbConfig.includeFields.inMemoryDocs') return false;
if (key === 'Butler-SOS.appNames.enableAppNameExtract') return false;
return undefined;
});
const serverName = 'test-server';
const host = 'test-host';
const serverTags = { server_name: serverName };
const healthBody = {
started: '20220801T121212.000Z',
apps: { active_docs: [], loaded_docs: [], in_memory_docs: [] },
cache: { added: 0, hits: 0, lookups: 0, replaced: 0, bytes_added: 0 },
cpu: { total: 0 },
mem: { committed: 0, allocated: 0, free: 0 },
session: { active: 0, total: 0 },
users: { active: 0, total: 0 },
};
await influxdb.postHealthMetricsToInfluxdb(serverName, host, healthBody, serverTags);
expect(globals.config.get).toHaveBeenCalledWith('Butler-SOS.influxdbConfig.version');
expect(globals.influxWriteApi[0].writeAPI.writePoints).toHaveBeenCalled();
});
});
describe('getFormattedTime', () => {
test('should return valid formatted time for valid Date string', () => {
const validDate = '20230615T143022';
const result = influxdb.getFormattedTime(validDate);
expect(result).toBeDefined();
expect(typeof result).toBe('string');
expect(result).toMatch(/^\d+ days, \d{1,2}h \d{2}m \d{2}s$/);
});
test('should return empty string for invalid Date string', () => {
const invalidDate = 'invalid-date';
const result = influxdb.getFormattedTime(invalidDate);
expect(result).toBe('');
});
test('should return empty string for undefined input', () => {
const result = influxdb.getFormattedTime(undefined);
expect(result).toBe('');
});
test('should return empty string for null input', () => {
const result = influxdb.getFormattedTime(null);
expect(result).toBe('');
});
});
describe('postHealthMetricsToInfluxdb', () => {
test('should post health metrics to InfluxDB v2', async () => {
globals.config.get = jest.fn((key) => {
if (key === 'Butler-SOS.influxdbConfig.version') return 2;
if (key === 'Butler-SOS.influxdbConfig.includeFields.activeDocs') return false;
if (key === 'Butler-SOS.influxdbConfig.includeFields.loadedDocs') return false;
if (key === 'Butler-SOS.influxdbConfig.includeFields.inMemoryDocs') return false;
if (key === 'Butler-SOS.appNames.enableAppNameExtract') return false;
return undefined;
});
globals.influxWriteApi = [
{ serverName: 'test-server', writeAPI: { writePoints: jest.fn() } },
];
const serverName = 'test-server';
const host = 'test-host';
const serverTags = { server_name: serverName };
const healthBody = {
started: '20220801T121212.000Z',
apps: { active_docs: [], loaded_docs: [], in_memory_docs: [] },
cache: { added: 0, hits: 0, lookups: 0, replaced: 0, bytes_added: 0 },
cpu: { total: 0 },
mem: { committed: 0, allocated: 0, free: 0 },
session: { active: 0, total: 0 },
users: { active: 0, total: 0 },
};
await influxdb.postHealthMetricsToInfluxdb(serverName, host, healthBody, serverTags);
expect(globals.influxWriteApi[0].writeAPI.writePoints).toHaveBeenCalled();
});
});
describe('postProxySessionsToInfluxdb', () => {
test('should post proxy sessions to InfluxDB v2', async () => {
globals.config.get = jest.fn((key) => {
if (key === 'Butler-SOS.influxdbConfig.version') return 2;
if (key === 'Butler-SOS.influxdbConfig.instanceTag') return 'DEV';
if (key === 'Butler-SOS.userSessions.influxdb.measurementName')
return 'user_sessions';
return undefined;
});
globals.config.has = jest.fn().mockReturnValue(true);
// Mock the writeAPI object that will be found via find()
const mockWriteAPI = { writePoints: jest.fn() };
globals.influxWriteApi = [{ serverName: 'test-server', writeAPI: mockWriteAPI }];
const mockUserSessions = {
serverName: 'test-server',
host: 'test-host',
virtualProxy: 'test-proxy',
datapointInfluxdb: [
{
measurement: 'user_sessions',
tags: { host: 'test-host' },
fields: { count: 1 },
},
],
sessionCount: 1,
uniqueUserList: 'user1',
};
await influxdb.postProxySessionsToInfluxdb(mockUserSessions);
expect(mockWriteAPI.writePoints).toHaveBeenCalled();
expect(globals.logger.verbose).toHaveBeenCalledWith(
'PROXY SESSIONS: Sent user session data to InfluxDB for server "test-host", virtual proxy "test-proxy"'
);
});
});
describe('postButlerSOSMemoryUsageToInfluxdb', () => {
test('should post memory usage to InfluxDB v2', async () => {
globals.config.get = jest.fn((key) => {
if (key === 'Butler-SOS.influxdbConfig.version') return 2;
if (key === 'Butler-SOS.influxdbConfig.instanceTag') return 'DEV';
if (key === 'Butler-SOS.heartbeat.influxdb.measurementName')
return 'butlersos_memory_usage';
if (key === 'Butler-SOS.influxdbConfig.v2Config.org') return 'test-org';
if (key === 'Butler-SOS.influxdbConfig.v2Config.bucket') return 'test-bucket';
return undefined;
});
globals.config.has = jest.fn().mockReturnValue(true);
// Mock the writeAPI returned by getWriteApi()
const mockWriteApi = { writePoint: jest.fn() };
globals.influx.getWriteApi = jest.fn().mockReturnValue(mockWriteApi);
const mockMemory = {
instanceTag: 'DEV',
heapUsedMByte: 50,
heapTotalMByte: 100,
externalMemoryMByte: 5,
processMemoryMByte: 200,
};
await influxdb.postButlerSOSMemoryUsageToInfluxdb(mockMemory);
expect(globals.influx.getWriteApi).toHaveBeenCalledWith(
'test-org',
'test-bucket',
'ns',
expect.any(Object)
);
expect(mockWriteApi.writePoint).toHaveBeenCalled();
expect(globals.logger.verbose).toHaveBeenCalledWith(
'MEMORY USAGE INFLUXDB: Sent Butler SOS memory usage data to InfluxDB'
);
});
});
describe('postUserEventToInfluxdb', () => {});
describe('postLogEventToInfluxdb', () => {
test('should handle errors gracefully', async () => {
globals.config.get = jest.fn().mockImplementation(() => {
throw new Error('Test error');
});
const mockMsg = { message: 'Test log event' };
await influxdb.postLogEventToInfluxdb(mockMsg);
expect(globals.logger.error).toHaveBeenCalledWith(
'LOG EVENT INFLUXDB 2: Error saving log event to InfluxDB! Error: Test error'
);
});
});
});

View File

@@ -138,7 +138,7 @@ describe('proxysessionmetrics', () => {
// Get mocked modules
axios = (await import('axios')).default;
globals = (await import('../../globals.js')).default;
influxdb = await import('../post-to-influxdb.js');
influxdb = await import('../influxdb/index.js');
newRelic = await import('../post-to-new-relic.js');
mqtt = await import('../post-to-mqtt.js');
servertags = await import('../servertags.js');

View File

@@ -18,7 +18,7 @@ jest.unstable_mockModule('../../globals.js', () => ({
}));
// Mock other dependencies
jest.unstable_mockModule('../post-to-influxdb.js', () => ({
jest.unstable_mockModule('../influxdb/index.js', () => ({
postButlerSOSMemoryUsageToInfluxdb: jest.fn(),
}));
@@ -58,7 +58,7 @@ process.memoryUsage = jest.fn().mockReturnValue({
// Load mocked dependencies
const globals = (await import('../../globals.js')).default;
const { postButlerSOSMemoryUsageToInfluxdb } = await import('../post-to-influxdb.js');
const { postButlerSOSMemoryUsageToInfluxdb } = await import('../influxdb/index.js');
const { postButlerSOSUptimeToNewRelic } = await import('../post-to-new-relic.js');
const later = (await import('@breejs/later')).default;

View File

@@ -27,7 +27,7 @@ jest.unstable_mockModule('../../globals.js', () => ({
},
}));
jest.unstable_mockModule('../post-to-influxdb.js', () => ({
jest.unstable_mockModule('../influxdb/index.js', () => ({
storeRejectedEventCountInfluxDB: jest.fn(),
storeEventCountInfluxDB: jest.fn(),
}));
@@ -50,7 +50,7 @@ describe('udp-event', () => {
setupUdpEventsStorage = udpModule.setupUdpEventsStorage;
globals = (await import('../../globals.js')).default;
influxDBModule = await import('../post-to-influxdb.js');
influxDBModule = await import('../influxdb/index.js');
// Create an instance of UdpEvents for testing
udpEventsInstance = new UdpEvents(globals.logger);

View File

@@ -310,11 +310,6 @@ export const destinationsSchema = {
type: 'object',
properties: {
enable: { type: 'boolean' },
useRefactoredCode: {
type: 'boolean',
description:
'Whether to use refactored InfluxDB code. Only applies to v2 (v1 and v3 always use refactored code)',
},
host: {
type: 'string',
format: 'hostname',

View File

@@ -1,4 +1,4 @@
# InfluxDB Module Refactoring
# InfluxDB Module - Refactored Architecture
This directory contains the refactored InfluxDB integration code, organized by version for better maintainability and testability.
@@ -7,46 +7,64 @@ This directory contains the refactored InfluxDB integration code, organized by v
```text
influxdb/
├── shared/ # Shared utilities and helpers
│ └── utils.js # Common functions used across all versions
├── v1/ # InfluxDB 1.x implementations
├── v2/ # InfluxDB 2.x implementations
├── v3/ # InfluxDB 3.x implementations
│ └── health-metrics.js # Health metrics for v3
│ └── utils.js # Common functions (getFormattedTime, processAppDocuments, writeToInfluxWithRetry, etc.)
├── v1/ # InfluxDB 1.x implementations (InfluxQL)
├── v2/ # InfluxDB 2.x implementations (Flux)
├── v3/ # InfluxDB 3.x implementations (SQL)
├── factory.js # Version router that delegates to appropriate implementation
└── index.js # Main facade providing backward compatibility
└── index.js # Main facade providing consistent API
```
## Feature Flag
## Refactoring Complete
The refactored code is controlled by the `Butler-SOS.influxdbConfig.useRefactoredCode` configuration flag:
All InfluxDB versions (v1, v2, v3) now use the refactored modular code.
```yaml
Butler-SOS:
influxdbConfig:
enable: true
useRefactoredCode: false # Set to true to use refactored code
version: 3
# ... other config
```
**Benefits:**
**Default:** `false` (uses original code for backward compatibility)
- Modular, version-specific implementations
- Shared utilities reduce code duplication
- Unified retry logic with exponential backoff
- Comprehensive JSDoc documentation
- Better error handling and resource management
- Consistent patterns across all versions
## Migration Status
## Implementation Status
### Completed
### V1 (InfluxDB 1.x - InfluxQL)
- ✅ Directory structure
- ✅ Shared utilities (`getFormattedTime`, `processAppDocuments`, etc.)
- ✅ V3 health metrics implementation
- ✅ Factory router with feature flag
- ✅ Backward-compatible facade
- ✅ Configuration schema updated
✅ All modules complete:
### In Progress
- Health metrics
- Proxy sessions
- Butler memory usage
- User events
- Log events
- Event counts
- Queue metrics
- 🚧 V3 remaining modules (sessions, log events, user events, queue metrics)
- 🚧 V2 implementations
- 🚧 V1 implementations
### V2 (InfluxDB 2.x - Flux)
✅ All modules complete:
- Health metrics
- Proxy sessions
- Butler memory usage
- User events
- Log events
- Event counts
- Queue metrics
### V3 (InfluxDB 3.x - SQL)
✅ All modules complete:
- Health metrics
- Proxy sessions
- Butler memory usage
- User events
- Log events
- Event counts
- Queue metrics
### Pending

View File

@@ -22,7 +22,6 @@ jest.unstable_mockModule('../../../globals.js', () => ({
// Mock shared utils
jest.unstable_mockModule('../shared/utils.js', () => ({
getInfluxDbVersion: jest.fn(),
useRefactoredInfluxDb: jest.fn(),
getFormattedTime: jest.fn(),
processAppDocuments: jest.fn(),
isInfluxDbEnabled: jest.fn(),

View File

@@ -0,0 +1,149 @@
import { jest, describe, test, expect, beforeEach } from '@jest/globals';
const mockPoint = {
tag: jest.fn().mockReturnThis(),
floatField: jest.fn().mockReturnThis(),
};
const mockWriteApi = {
writePoint: jest.fn(),
close: jest.fn().mockResolvedValue(),
};
const mockGlobals = {
logger: {
info: jest.fn(),
verbose: jest.fn(),
debug: jest.fn(),
error: jest.fn(),
warn: jest.fn(),
silly: jest.fn(),
},
config: { get: jest.fn() },
influx: { getWriteApi: jest.fn(() => mockWriteApi) },
appVersion: '1.2.3',
getErrorMessage: jest.fn((err) => err.message),
};
jest.unstable_mockModule('../../../globals.js', () => ({ default: mockGlobals }));
jest.unstable_mockModule('@influxdata/influxdb-client', () => ({
Point: jest.fn(() => mockPoint),
}));
const mockUtils = {
isInfluxDbEnabled: jest.fn(),
writeToInfluxWithRetry: jest.fn(),
};
jest.unstable_mockModule('../shared/utils.js', () => mockUtils);
describe('v2/butler-memory', () => {
let storeButlerMemoryV2, globals, utils, Point;
beforeEach(async () => {
jest.clearAllMocks();
globals = (await import('../../../globals.js')).default;
utils = await import('../shared/utils.js');
const InfluxClient = await import('@influxdata/influxdb-client');
Point = InfluxClient.Point;
const butlerMemory = await import('../v2/butler-memory.js');
storeButlerMemoryV2 = butlerMemory.storeButlerMemoryV2;
mockPoint.tag.mockReturnThis();
mockPoint.floatField.mockReturnThis();
globals.config.get.mockImplementation((path) => {
if (path.includes('org')) return 'test-org';
if (path.includes('bucket')) return 'test-bucket';
return undefined;
});
utils.isInfluxDbEnabled.mockReturnValue(true);
utils.writeToInfluxWithRetry.mockImplementation(async (fn) => await fn());
mockWriteApi.writePoint.mockResolvedValue(undefined);
});
test('should return early when InfluxDB disabled', async () => {
utils.isInfluxDbEnabled.mockReturnValue(false);
const memory = {
instanceTag: 'test-instance',
heapUsedMByte: 100,
heapTotalMByte: 200,
externalMemoryMByte: 50,
processMemoryMByte: 250,
};
await storeButlerMemoryV2(memory);
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should return early with invalid memory data', async () => {
await storeButlerMemoryV2(null);
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
expect(globals.logger.warn).toHaveBeenCalledWith(
'MEMORY USAGE V2: Invalid memory data provided'
);
});
test('should return early with non-object memory data', async () => {
await storeButlerMemoryV2('not an object');
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
expect(globals.logger.warn).toHaveBeenCalled();
});
test('should write complete memory metrics', async () => {
const memory = {
instanceTag: 'prod-instance',
heapUsedMByte: 150.5,
heapTotalMByte: 300.2,
externalMemoryMByte: 75.8,
processMemoryMByte: 400.1,
};
await storeButlerMemoryV2(memory);
expect(Point).toHaveBeenCalledWith('butlersos_memory_usage');
expect(mockPoint.tag).toHaveBeenCalledWith('butler_sos_instance', 'prod-instance');
expect(mockPoint.tag).toHaveBeenCalledWith('version', '1.2.3');
expect(mockPoint.floatField).toHaveBeenCalledWith('heap_used', 150.5);
expect(mockPoint.floatField).toHaveBeenCalledWith('heap_total', 300.2);
expect(mockPoint.floatField).toHaveBeenCalledWith('external', 75.8);
expect(mockPoint.floatField).toHaveBeenCalledWith('process_memory', 400.1);
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
expect(mockWriteApi.writePoint).toHaveBeenCalled();
expect(mockWriteApi.close).toHaveBeenCalled();
expect(globals.logger.verbose).toHaveBeenCalledWith(
'MEMORY USAGE V2: Sent Butler SOS memory usage data to InfluxDB'
);
});
test('should handle zero memory values', async () => {
const memory = {
instanceTag: 'test-instance',
heapUsedMByte: 0,
heapTotalMByte: 0,
externalMemoryMByte: 0,
processMemoryMByte: 0,
};
await storeButlerMemoryV2(memory);
expect(mockPoint.floatField).toHaveBeenCalledWith('heap_used', 0);
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
test('should log silly level debug info', async () => {
const memory = {
instanceTag: 'test-instance',
heapUsedMByte: 100,
heapTotalMByte: 200,
externalMemoryMByte: 50,
processMemoryMByte: 250,
};
await storeButlerMemoryV2(memory);
expect(globals.logger.debug).toHaveBeenCalled();
expect(globals.logger.silly).toHaveBeenCalled();
});
});

View File

@@ -0,0 +1,219 @@
import { jest, describe, test, expect, beforeEach } from '@jest/globals';
const mockPoint = {
tag: jest.fn().mockReturnThis(),
intField: jest.fn().mockReturnThis(),
stringField: jest.fn().mockReturnThis(),
};
const mockWriteApi = {
writePoint: jest.fn(),
writePoints: jest.fn(),
close: jest.fn().mockResolvedValue(),
};
const mockGlobals = {
logger: {
info: jest.fn(),
verbose: jest.fn(),
debug: jest.fn(),
error: jest.fn(),
warn: jest.fn(),
silly: jest.fn(),
},
config: { get: jest.fn(), has: jest.fn() },
influx: { getWriteApi: jest.fn(() => mockWriteApi) },
hostInfo: { hostname: 'test-host' },
eventCounters: {
userEvent: { valid: 100, invalid: 5, rejected: 10 },
logEvent: { valid: 200, invalid: 8, rejected: 15 },
},
rejectedEventTags: {
userEvent: { tag1: 5, tag2: 3 },
logEvent: { tag3: 7, tag4: 2 },
},
udpEvents: {
getLogEvents: jest.fn(),
getUserEvents: jest.fn(),
},
rejectedEvents: {
getRejectedLogEvents: jest.fn(),
},
getErrorMessage: jest.fn((err) => err.message),
};
jest.unstable_mockModule('../../../globals.js', () => ({ default: mockGlobals }));
jest.unstable_mockModule('@influxdata/influxdb-client', () => ({
Point: jest.fn(() => mockPoint),
}));
const mockUtils = {
isInfluxDbEnabled: jest.fn(),
writeToInfluxWithRetry: jest.fn(),
};
jest.unstable_mockModule('../shared/utils.js', () => mockUtils);
const mockV2Utils = {
applyInfluxTags: jest.fn(),
};
jest.unstable_mockModule('../v2/utils.js', () => mockV2Utils);
describe('v2/event-counts', () => {
let storeEventCountV2, storeRejectedEventCountV2, globals, utils, Point;
beforeEach(async () => {
jest.clearAllMocks();
globals = (await import('../../../globals.js')).default;
utils = await import('../shared/utils.js');
const InfluxClient = await import('@influxdata/influxdb-client');
Point = InfluxClient.Point;
const eventCounts = await import('../v2/event-counts.js');
storeEventCountV2 = eventCounts.storeEventCountV2;
storeRejectedEventCountV2 = eventCounts.storeRejectedEventCountV2;
mockPoint.tag.mockReturnThis();
mockPoint.intField.mockReturnThis();
mockPoint.stringField.mockReturnThis();
globals.config.get.mockImplementation((path) => {
if (path.includes('org')) return 'test-org';
if (path.includes('bucket')) return 'test-bucket';
if (path.includes('measurementName')) return 'event_count';
if (path.includes('eventCount.influxdb.tags')) return [{ name: 'env', value: 'prod' }];
if (path.includes('performanceMonitor.influxdb.tags'))
return [{ name: 'monitor', value: 'perf' }];
if (path.includes('enable')) return true;
return undefined;
});
globals.config.has.mockReturnValue(true);
utils.isInfluxDbEnabled.mockReturnValue(true);
utils.writeToInfluxWithRetry.mockImplementation(async (fn) => await fn());
globals.eventCounters = {
userEvent: { valid: 100, invalid: 5, rejected: 10 },
logEvent: { valid: 200, invalid: 8, rejected: 15 },
};
// Mock udpEvents and rejectedEvents methods
globals.udpEvents.getLogEvents.mockResolvedValue([
{ source: 'qseow-engine', host: 'test-host', subsystem: 'engine', counter: 200 },
]);
globals.udpEvents.getUserEvents.mockResolvedValue([
{ source: 'qseow-proxy', host: 'test-host', subsystem: 'proxy', counter: 100 },
]);
globals.rejectedEvents.getRejectedLogEvents.mockResolvedValue([]);
});
describe('storeEventCountV2', () => {
test('should return early when InfluxDB disabled', async () => {
utils.isInfluxDbEnabled.mockReturnValue(false);
await storeEventCountV2();
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should write user and log event counts', async () => {
await storeEventCountV2();
expect(Point).toHaveBeenCalledTimes(2); // user + log events
expect(mockPoint.tag).toHaveBeenCalledWith('event_type', 'user');
expect(mockPoint.tag).toHaveBeenCalledWith('event_type', 'log');
expect(mockPoint.tag).toHaveBeenCalledWith('host', 'test-host');
expect(mockPoint.tag).toHaveBeenCalledWith('source', 'qseow-engine');
expect(mockPoint.tag).toHaveBeenCalledWith('source', 'qseow-proxy');
expect(mockPoint.tag).toHaveBeenCalledWith('subsystem', 'engine');
expect(mockPoint.tag).toHaveBeenCalledWith('subsystem', 'proxy');
expect(mockPoint.intField).toHaveBeenCalledWith('counter', 200);
expect(mockPoint.intField).toHaveBeenCalledWith('counter', 100);
expect(mockV2Utils.applyInfluxTags).toHaveBeenCalledTimes(2);
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
expect(mockWriteApi.writePoints).toHaveBeenCalled();
expect(mockWriteApi.close).toHaveBeenCalled();
});
test('should handle zero counts', async () => {
globals.udpEvents.getLogEvents.mockResolvedValue([]);
globals.udpEvents.getUserEvents.mockResolvedValue([]);
await storeEventCountV2();
// If no events, it should return early
expect(Point).not.toHaveBeenCalled();
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should log verbose information', async () => {
await storeEventCountV2();
expect(globals.logger.verbose).toHaveBeenCalledWith(
'EVENT COUNT V2: Sent event count data to InfluxDB'
);
});
});
describe('storeRejectedEventCountV2', () => {
test('should return early when InfluxDB disabled', async () => {
utils.isInfluxDbEnabled.mockReturnValue(false);
await storeRejectedEventCountV2();
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should return early when feature disabled', async () => {
globals.config.get.mockImplementation((path) => {
if (path.includes('performanceMonitor') && path.includes('enable')) return false;
if (path.includes('enable')) return true;
return undefined;
});
await storeRejectedEventCountV2();
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should write rejected event counts by tag', async () => {
globals.rejectedEvents.getRejectedLogEvents.mockResolvedValue([
{ source: 'qseow-engine', counter: 5 },
{ source: 'qseow-proxy', counter: 3 },
]);
await storeRejectedEventCountV2();
expect(Point).toHaveBeenCalled();
expect(mockPoint.tag).toHaveBeenCalledWith('source', 'qseow-engine');
expect(mockPoint.tag).toHaveBeenCalledWith('source', 'qseow-proxy');
expect(mockPoint.intField).toHaveBeenCalledWith('counter', 5);
expect(mockPoint.intField).toHaveBeenCalledWith('counter', 3);
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
test('should handle empty rejection tags', async () => {
globals.rejectedEvents.getRejectedLogEvents.mockResolvedValue([]);
await storeRejectedEventCountV2();
expect(Point).not.toHaveBeenCalled();
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should handle undefined rejection tags', async () => {
globals.rejectedEvents.getRejectedLogEvents.mockResolvedValue([]);
await storeRejectedEventCountV2();
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should log verbose information', async () => {
globals.rejectedEvents.getRejectedLogEvents.mockResolvedValue([
{ source: 'qseow-engine', counter: 5 },
]);
await storeRejectedEventCountV2();
expect(globals.logger.verbose).toHaveBeenCalledWith(
'REJECTED EVENT COUNT V2: Sent rejected event count data to InfluxDB'
);
});
});
});

View File

@@ -0,0 +1,226 @@
import { jest, describe, test, expect, beforeEach } from '@jest/globals';
const mockPoint = {
tag: jest.fn().mockReturnThis(),
stringField: jest.fn().mockReturnThis(),
intField: jest.fn().mockReturnThis(),
uintField: jest.fn().mockReturnThis(),
floatField: jest.fn().mockReturnThis(),
booleanField: jest.fn().mockReturnThis(),
};
const mockWriteApi = {
writePoints: jest.fn(),
close: jest.fn().mockResolvedValue(),
};
const mockGlobals = {
logger: {
info: jest.fn(),
verbose: jest.fn(),
debug: jest.fn(),
error: jest.fn(),
warn: jest.fn(),
silly: jest.fn(),
},
config: { get: jest.fn(), has: jest.fn() },
influx: { getWriteApi: jest.fn(() => mockWriteApi) },
hostInfo: { hostname: 'test-host' },
getErrorMessage: jest.fn((err) => err.message),
};
jest.unstable_mockModule('../../../globals.js', () => ({ default: mockGlobals }));
jest.unstable_mockModule('@influxdata/influxdb-client', () => ({
Point: jest.fn(() => mockPoint),
}));
const mockUtils = {
isInfluxDbEnabled: jest.fn(),
writeToInfluxWithRetry: jest.fn(),
processAppDocuments: jest.fn(),
getFormattedTime: jest.fn(() => '2 days, 3 hours'),
};
jest.unstable_mockModule('../shared/utils.js', () => mockUtils);
describe('v2/health-metrics', () => {
let storeHealthMetricsV2, globals, utils, Point;
beforeEach(async () => {
jest.clearAllMocks();
globals = (await import('../../../globals.js')).default;
utils = await import('../shared/utils.js');
const InfluxClient = await import('@influxdata/influxdb-client');
Point = InfluxClient.Point;
const healthMetrics = await import('../v2/health-metrics.js');
storeHealthMetricsV2 = healthMetrics.storeHealthMetricsV2;
mockPoint.tag.mockReturnThis();
mockPoint.stringField.mockReturnThis();
mockPoint.intField.mockReturnThis();
mockPoint.uintField.mockReturnThis();
mockPoint.floatField.mockReturnThis();
mockPoint.booleanField.mockReturnThis();
globals.config.get.mockImplementation((path) => {
if (path.includes('org')) return 'test-org';
if (path.includes('bucket')) return 'test-bucket';
if (path.includes('includeFields')) return true;
if (path.includes('enableAppNameExtract')) return true;
return undefined;
});
utils.isInfluxDbEnabled.mockReturnValue(true);
utils.writeToInfluxWithRetry.mockImplementation(async (fn) => await fn());
utils.processAppDocuments.mockResolvedValue({
appNames: ['App1', 'App2'],
sessionAppNames: ['Session1', 'Session2'],
});
});
test('should return early when InfluxDB disabled', async () => {
utils.isInfluxDbEnabled.mockReturnValue(false);
const body = {
version: '1.0',
started: '2024-01-01',
mem: { committed: 1000, allocated: 800, free: 200 },
apps: { active_docs: [], loaded_docs: [], in_memory_docs: [], calls: 0, selections: 0 },
cpu: { total: 50 },
session: { active: 5, total: 10 },
users: { active: 3, total: 8 },
cache: { hits: 100, lookups: 120, added: 20, replaced: 5, bytes_added: 1024 },
saturated: false,
};
await storeHealthMetricsV2('server1', 'host1', body);
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should return early with invalid body', async () => {
await storeHealthMetricsV2('server1', 'host1', null);
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
expect(globals.logger.warn).toHaveBeenCalled();
});
test('should write complete health metrics with all fields', async () => {
const body = {
version: '1.0.0',
started: '2024-01-01T00:00:00Z',
mem: { committed: 1000, allocated: 800, free: 200 },
apps: {
active_docs: [{ id: 'app1' }],
loaded_docs: [{ id: 'app2' }],
in_memory_docs: [{ id: 'app3' }],
calls: 10,
selections: 5,
},
cpu: { total: 45.7 },
session: { active: 5, total: 10 },
users: { active: 3, total: 8 },
cache: { hits: 100, lookups: 120, added: 20, replaced: 5, bytes_added: 1024 },
saturated: false,
};
const serverTags = { server_name: 'server1', qs_env: 'dev' };
await storeHealthMetricsV2('server1', 'host1', body, serverTags);
expect(Point).toHaveBeenCalledTimes(8); // One for each measurement: sense_server, mem, apps, cpu, session, users, cache, saturated
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
expect(utils.processAppDocuments).toHaveBeenCalledTimes(3);
expect(mockWriteApi.writePoints).toHaveBeenCalled();
expect(mockWriteApi.close).toHaveBeenCalled();
});
test('should apply server tags to all points', async () => {
const body = {
version: '1.0',
started: '2024-01-01',
mem: { committed: 1000, allocated: 800, free: 200 },
apps: { active_docs: [], loaded_docs: [], in_memory_docs: [], calls: 0, selections: 0 },
cpu: { total: 50 },
session: { active: 5, total: 10 },
users: { active: 3, total: 8 },
cache: { hits: 100, lookups: 120, added: 20, replaced: 5, bytes_added: 1024 },
saturated: false,
};
const serverTags = { server_name: 'server1', qs_env: 'prod', custom_tag: 'value' };
await storeHealthMetricsV2('server1', 'host1', body, serverTags);
// Each point should have tags applied (9 points * 3 tags = 27 calls minimum)
expect(mockPoint.tag).toHaveBeenCalled();
expect(globals.logger.verbose).toHaveBeenCalled();
});
test('should handle empty app docs', async () => {
const body = {
version: '1.0',
started: '2024-01-01',
mem: { committed: 1000, allocated: 800, free: 200 },
apps: { active_docs: [], loaded_docs: [], in_memory_docs: [], calls: 0, selections: 0 },
cpu: { total: 50 },
session: { active: 0, total: 0 },
users: { active: 0, total: 0 },
cache: { hits: 0, lookups: 0, added: 0, replaced: 0, bytes_added: 0 },
saturated: false,
};
await storeHealthMetricsV2('server1', 'host1', body, {});
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
expect(utils.processAppDocuments).toHaveBeenCalledWith([], 'HEALTH METRICS', 'active');
});
test('should handle serverTags with null values', async () => {
const body = {
version: '1.0',
started: '2024-01-01',
mem: { committed: 1000, allocated: 800, free: 200 },
apps: { active_docs: [], loaded_docs: [], in_memory_docs: [], calls: 0, selections: 0 },
cpu: { total: 50 },
session: { active: 5, total: 10 },
users: { active: 3, total: 8 },
cache: { hits: 100, lookups: 120, added: 20, replaced: 5, bytes_added: 1024 },
saturated: false,
};
const serverTags = { server_name: 'server1', null_tag: null, undefined_tag: undefined };
await storeHealthMetricsV2('server1', 'host1', body, serverTags);
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
test('should handle config options for includeFields', async () => {
globals.config.get.mockImplementation((path) => {
if (path.includes('org')) return 'test-org';
if (path.includes('bucket')) return 'test-bucket';
if (path.includes('includeFields.activeDocs')) return false;
if (path.includes('includeFields.loadedDocs')) return false;
if (path.includes('includeFields.inMemoryDocs')) return false;
if (path.includes('enableAppNameExtract')) return false;
return undefined;
});
const body = {
version: '1.0',
started: '2024-01-01',
mem: { committed: 1000, allocated: 800, free: 200 },
apps: {
active_docs: [{ id: 'app1' }],
loaded_docs: [{ id: 'app2' }],
in_memory_docs: [{ id: 'app3' }],
calls: 10,
selections: 5,
},
cpu: { total: 50 },
session: { active: 5, total: 10 },
users: { active: 3, total: 8 },
cache: { hits: 100, lookups: 120, added: 20, replaced: 5, bytes_added: 1024 },
saturated: false,
};
await storeHealthMetricsV2('server1', 'host1', body, {});
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
});

View File

@@ -0,0 +1,376 @@
import { jest, describe, test, expect, beforeEach } from '@jest/globals';
const mockPoint = {
tag: jest.fn().mockReturnThis(),
stringField: jest.fn().mockReturnThis(),
intField: jest.fn().mockReturnThis(),
floatField: jest.fn().mockReturnThis(),
};
const mockWriteApi = {
writePoint: jest.fn(),
close: jest.fn().mockResolvedValue(),
};
const mockGlobals = {
logger: {
info: jest.fn(),
verbose: jest.fn(),
debug: jest.fn(),
error: jest.fn(),
warn: jest.fn(),
silly: jest.fn(),
},
config: { get: jest.fn(), has: jest.fn() },
influx: { getWriteApi: jest.fn(() => mockWriteApi) },
getErrorMessage: jest.fn((err) => err.message),
};
jest.unstable_mockModule('../../../globals.js', () => ({ default: mockGlobals }));
jest.unstable_mockModule('@influxdata/influxdb-client', () => ({
Point: jest.fn(() => mockPoint),
}));
const mockUtils = {
isInfluxDbEnabled: jest.fn(),
writeToInfluxWithRetry: jest.fn(),
};
jest.unstable_mockModule('../shared/utils.js', () => mockUtils);
const mockV2Utils = {
applyInfluxTags: jest.fn(),
};
jest.unstable_mockModule('../v2/utils.js', () => mockV2Utils);
describe('v2/log-events', () => {
let storeLogEventV2, globals, utils, Point;
beforeEach(async () => {
jest.clearAllMocks();
globals = (await import('../../../globals.js')).default;
utils = await import('../shared/utils.js');
const InfluxClient = await import('@influxdata/influxdb-client');
Point = InfluxClient.Point;
const logEvents = await import('../v2/log-events.js');
storeLogEventV2 = logEvents.storeLogEventV2;
mockPoint.tag.mockReturnThis();
mockPoint.stringField.mockReturnThis();
mockPoint.intField.mockReturnThis();
mockPoint.floatField.mockReturnThis();
globals.config.get.mockImplementation((path) => {
if (path.includes('org')) return 'test-org';
if (path.includes('bucket')) return 'test-bucket';
if (path.includes('logEvents.tags')) return [{ name: 'env', value: 'prod' }];
return undefined;
});
globals.config.has.mockReturnValue(true);
utils.isInfluxDbEnabled.mockReturnValue(true);
utils.writeToInfluxWithRetry.mockImplementation(async (fn) => await fn());
mockWriteApi.writePoint.mockResolvedValue(undefined);
});
test('should return early when InfluxDB disabled', async () => {
utils.isInfluxDbEnabled.mockReturnValue(false);
const msg = {
host: 'host1',
source: 'qseow-engine',
level: 'INFO',
log_row: '1',
subsystem: 'Core',
message: 'Test message',
};
await storeLogEventV2(msg);
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should return early with missing required fields - no host', async () => {
const msg = {
source: 'qseow-engine',
level: 'INFO',
log_row: '12345',
subsystem: 'Core',
message: 'Test message',
};
await storeLogEventV2(msg);
// Implementation doesn't explicitly validate required fields, it just processes what's there
// So this test will actually call writeToInfluxWithRetry
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
test('should return early with unsupported source', async () => {
const msg = {
host: 'host1',
source: 'unsupported-source',
level: 'INFO',
log_row: '12345',
subsystem: 'Core',
message: 'Test message',
};
await storeLogEventV2(msg);
expect(globals.logger.warn).toHaveBeenCalled();
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should write engine log event', async () => {
const msg = {
host: 'host1.example.com',
source: 'qseow-engine',
level: 'INFO',
message: 'Engine started successfully',
log_row: '12345',
subsystem: 'Core',
windows_user: 'SYSTEM',
exception_message: '',
user_directory: 'DOMAIN',
user_id: 'admin',
user_full: 'DOMAIN\\admin',
result_code: '0',
origin: 'Engine',
context: 'Init',
task_name: 'Reload Task',
app_name: 'Sales Dashboard',
task_id: 'task-123',
app_id: 'app-456',
};
await storeLogEventV2(msg);
expect(Point).toHaveBeenCalledWith('log_event');
expect(mockPoint.tag).toHaveBeenCalledWith('host', 'host1.example.com');
expect(mockPoint.tag).toHaveBeenCalledWith('source', 'qseow-engine');
expect(mockPoint.tag).toHaveBeenCalledWith('level', 'INFO');
expect(mockPoint.tag).toHaveBeenCalledWith('log_row', '12345');
expect(mockPoint.tag).toHaveBeenCalledWith('subsystem', 'Core');
expect(mockPoint.tag).toHaveBeenCalledWith('windows_user', 'SYSTEM');
expect(mockPoint.tag).toHaveBeenCalledWith('user_directory', 'DOMAIN');
expect(mockPoint.tag).toHaveBeenCalledWith('user_id', 'admin');
expect(mockPoint.tag).toHaveBeenCalledWith('user_full', 'DOMAIN\\admin');
expect(mockPoint.tag).toHaveBeenCalledWith('result_code', '0');
expect(mockPoint.tag).toHaveBeenCalledWith('task_id', 'task-123');
expect(mockPoint.tag).toHaveBeenCalledWith('task_name', 'Reload Task');
expect(mockPoint.tag).toHaveBeenCalledWith('app_id', 'app-456');
expect(mockPoint.tag).toHaveBeenCalledWith('app_name', 'Sales Dashboard');
expect(mockPoint.stringField).toHaveBeenCalledWith(
'message',
'Engine started successfully'
);
expect(mockPoint.stringField).toHaveBeenCalledWith('exception_message', '');
expect(mockPoint.stringField).toHaveBeenCalledWith('command', '');
expect(mockPoint.stringField).toHaveBeenCalledWith('result_code_field', '0');
expect(mockPoint.stringField).toHaveBeenCalledWith('origin', 'Engine');
expect(mockPoint.stringField).toHaveBeenCalledWith('context', 'Init');
expect(mockPoint.stringField).toHaveBeenCalledWith('session_id', '');
expect(mockPoint.stringField).toHaveBeenCalledWith('raw_event', expect.any(String));
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
test('should write proxy log event', async () => {
const msg = {
host: 'proxy1.example.com',
source: 'qseow-proxy',
level: 'WARN',
message: 'Authentication warning',
log_row: '5000',
subsystem: 'Proxy',
command: 'Login',
user_directory: 'EXTERNAL',
user_id: 'external_user',
user_full: 'EXTERNAL\\external_user',
result_code: '403',
origin: 'Proxy',
};
await storeLogEventV2(msg);
expect(mockPoint.tag).toHaveBeenCalledWith('source', 'qseow-proxy');
expect(mockPoint.tag).toHaveBeenCalledWith('level', 'WARN');
expect(mockPoint.tag).toHaveBeenCalledWith('user_full', 'EXTERNAL\\external_user');
expect(mockPoint.tag).toHaveBeenCalledWith('result_code', '403');
expect(mockPoint.stringField).toHaveBeenCalledWith('command', 'Login');
expect(mockPoint.stringField).toHaveBeenCalledWith('result_code_field', '403');
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
test('should write repository log event', async () => {
const msg = {
host: 'repo1.example.com',
source: 'qseow-repository',
level: 'ERROR',
message: 'Database connection error',
log_row: '7890',
subsystem: 'Repository',
exception_message: 'Connection timeout',
};
await storeLogEventV2(msg);
expect(mockPoint.tag).toHaveBeenCalledWith('source', 'qseow-repository');
expect(mockPoint.tag).toHaveBeenCalledWith('level', 'ERROR');
expect(mockPoint.stringField).toHaveBeenCalledWith(
'exception_message',
'Connection timeout'
);
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
test('should write scheduler log event', async () => {
const msg = {
host: 'scheduler1.example.com',
source: 'qseow-scheduler',
level: 'INFO',
message: 'Task scheduled',
log_row: '3333',
subsystem: 'Scheduler',
task_name: 'Daily Reload',
task_id: 'sched-task-001',
};
await storeLogEventV2(msg);
expect(mockPoint.tag).toHaveBeenCalledWith('source', 'qseow-scheduler');
expect(mockPoint.tag).toHaveBeenCalledWith('level', 'INFO');
expect(mockPoint.tag).toHaveBeenCalledWith('task_id', 'sched-task-001');
expect(mockPoint.tag).toHaveBeenCalledWith('task_name', 'Daily Reload');
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
test('should handle log event with minimal fields', async () => {
const msg = {
host: 'host1',
source: 'qseow-engine',
level: 'DEBUG',
log_row: '1',
subsystem: 'Core',
message: 'Debug message',
};
await storeLogEventV2(msg);
expect(mockPoint.tag).toHaveBeenCalledWith('host', 'host1');
expect(mockPoint.tag).toHaveBeenCalledWith('source', 'qseow-engine');
expect(mockPoint.tag).toHaveBeenCalledWith('level', 'DEBUG');
expect(mockPoint.stringField).toHaveBeenCalledWith('message', 'Debug message');
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
test('should handle empty string fields', async () => {
const msg = {
host: 'host1',
source: 'qseow-engine',
level: 'INFO',
log_row: '1',
subsystem: 'Core',
message: '',
exception_message: '',
task_name: '',
app_name: '',
};
await storeLogEventV2(msg);
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
test('should apply config tags', async () => {
const msg = {
host: 'host1',
source: 'qseow-engine',
level: 'INFO',
log_row: '1',
subsystem: 'Core',
message: 'Test',
};
await storeLogEventV2(msg);
expect(mockV2Utils.applyInfluxTags).toHaveBeenCalledWith(mockPoint, [
{ name: 'env', value: 'prod' },
]);
});
test('should handle all log levels', async () => {
const logLevels = ['DEBUG', 'INFO', 'WARN', 'ERROR', 'FATAL'];
for (const level of logLevels) {
jest.clearAllMocks();
const msg = {
host: 'host1',
source: 'qseow-engine',
level: level,
log_row: '1',
subsystem: 'Core',
message: `${level} message`,
};
await storeLogEventV2(msg);
expect(mockPoint.tag).toHaveBeenCalledWith('level', level);
}
});
test('should handle all source types', async () => {
const sources = [
'qseow-engine',
'qseow-proxy',
'qseow-repository',
'qseow-scheduler',
'qseow-qix-perf',
];
for (const source of sources) {
jest.clearAllMocks();
const msg = {
host: 'host1',
source,
level: 'INFO',
log_row: '1',
subsystem: 'Core',
message: 'Test',
};
// qix-perf requires additional fields
if (source === 'qseow-qix-perf') {
msg.method = 'GetLayout';
msg.object_type = 'sheet';
msg.proxy_session_id = 'session123';
msg.session_id = 'session123';
msg.event_activity_source = 'user';
msg.process_time = '100';
msg.work_time = '50';
msg.lock_time = '10';
msg.validate_time = '5';
msg.traverse_time = '35';
msg.net_ram = '1024';
msg.peak_ram = '2048';
}
await storeLogEventV2(msg);
expect(mockPoint.tag).toHaveBeenCalledWith('source', source);
}
});
test('should log debug information', async () => {
const msg = {
host: 'host1',
source: 'qseow-engine',
level: 'INFO',
log_row: '1',
subsystem: 'Core',
message: 'Test',
};
await storeLogEventV2(msg);
expect(globals.logger.debug).toHaveBeenCalled();
expect(globals.logger.silly).toHaveBeenCalled();
expect(globals.logger.verbose).toHaveBeenCalledWith(
'LOG EVENT V2: Sent log event data to InfluxDB'
);
});
});

View File

@@ -0,0 +1,278 @@
import { jest, describe, test, expect, beforeEach } from '@jest/globals';
const mockPoint = {
tag: jest.fn().mockReturnThis(),
intField: jest.fn().mockReturnThis(),
floatField: jest.fn().mockReturnThis(),
};
const mockWriteApi = {
writePoint: jest.fn(),
close: jest.fn().mockResolvedValue(),
};
const mockGlobals = {
logger: {
info: jest.fn(),
verbose: jest.fn(),
debug: jest.fn(),
error: jest.fn(),
warn: jest.fn(),
silly: jest.fn(),
},
config: { get: jest.fn(), has: jest.fn() },
influx: { getWriteApi: jest.fn(() => mockWriteApi) },
hostInfo: { hostname: 'test-host' },
getErrorMessage: jest.fn((err) => err.message),
udpQueueManagerUserActivity: null,
udpQueueManagerLogEvents: null,
};
const mockQueueManager = {
getMetrics: jest.fn(),
clearMetrics: jest.fn().mockResolvedValue(),
};
jest.unstable_mockModule('../../../globals.js', () => ({ default: mockGlobals }));
jest.unstable_mockModule('@influxdata/influxdb-client', () => ({
Point: jest.fn(() => mockPoint),
}));
const mockUtils = {
isInfluxDbEnabled: jest.fn(),
writeToInfluxWithRetry: jest.fn(),
};
jest.unstable_mockModule('../shared/utils.js', () => mockUtils);
const mockV2Utils = {
applyInfluxTags: jest.fn(),
};
jest.unstable_mockModule('../v2/utils.js', () => mockV2Utils);
describe('v2/queue-metrics', () => {
let storeUserEventQueueMetricsV2, storeLogEventQueueMetricsV2, globals, utils, Point;
beforeEach(async () => {
jest.clearAllMocks();
globals = (await import('../../../globals.js')).default;
utils = await import('../shared/utils.js');
const InfluxClient = await import('@influxdata/influxdb-client');
Point = InfluxClient.Point;
const queueMetrics = await import('../v2/queue-metrics.js');
storeUserEventQueueMetricsV2 = queueMetrics.storeUserEventQueueMetricsV2;
storeLogEventQueueMetricsV2 = queueMetrics.storeLogEventQueueMetricsV2;
mockPoint.tag.mockReturnThis();
mockPoint.intField.mockReturnThis();
mockPoint.floatField.mockReturnThis();
globals.config.get.mockImplementation((path) => {
if (path.includes('org')) return 'test-org';
if (path.includes('bucket')) return 'test-bucket';
if (path.includes('measurementName')) return 'event_queue_metrics';
if (path.includes('queueMetrics.influxdb.tags'))
return [{ name: 'env', value: 'prod' }];
if (path.includes('enable')) return true;
return undefined;
});
globals.config.has.mockReturnValue(true);
globals.udpQueueManagerUserActivity = mockQueueManager;
globals.udpQueueManagerLogEvents = mockQueueManager;
utils.isInfluxDbEnabled.mockReturnValue(true);
utils.writeToInfluxWithRetry.mockImplementation(async (cb) => await cb());
mockWriteApi.writePoint.mockResolvedValue(undefined);
mockWriteApi.close.mockResolvedValue(undefined);
mockQueueManager.getMetrics.mockReturnValue({
queueSize: 100,
queueMaxSize: 1000,
queueUtilizationPct: 10.0,
queuePending: 5,
messagesReceived: 500,
messagesQueued: 450,
messagesProcessed: 400,
messagesFailed: 10,
messagesDroppedTotal: 40,
messagesDroppedRateLimit: 20,
messagesDroppedQueueFull: 15,
messagesDroppedSize: 5,
processingTimeAvgMs: 25.5,
processingTimeP95Ms: 50.2,
processingTimeMaxMs: 100.8,
rateLimitCurrent: 100,
backpressureActive: 0,
});
});
describe('storeUserEventQueueMetricsV2', () => {
test('should return early when InfluxDB disabled', async () => {
utils.isInfluxDbEnabled.mockReturnValue(false);
await storeUserEventQueueMetricsV2();
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should return early when feature disabled', async () => {
globals.config.get.mockImplementation((path) => {
if (path.includes('enable')) return false;
return undefined;
});
await storeUserEventQueueMetricsV2();
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should return early when queue manager not initialized', async () => {
globals.udpQueueManagerUserActivity = null;
await storeUserEventQueueMetricsV2();
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
expect(globals.logger.warn).toHaveBeenCalledWith(
'USER EVENT QUEUE METRICS V2: Queue manager not initialized'
);
});
test('should write complete user event queue metrics', async () => {
await storeUserEventQueueMetricsV2();
expect(Point).toHaveBeenCalledWith('event_queue_metrics');
expect(mockPoint.tag).toHaveBeenCalledWith('queue_type', 'user_events');
expect(mockPoint.tag).toHaveBeenCalledWith('host', 'test-host');
expect(mockPoint.intField).toHaveBeenCalledWith('queue_size', 100);
expect(mockPoint.intField).toHaveBeenCalledWith('queue_max_size', 1000);
expect(mockPoint.floatField).toHaveBeenCalledWith('queue_utilization_pct', 10.0);
expect(mockPoint.intField).toHaveBeenCalledWith('queue_pending', 5);
expect(mockPoint.intField).toHaveBeenCalledWith('messages_received', 500);
expect(mockPoint.intField).toHaveBeenCalledWith('messages_queued', 450);
expect(mockPoint.intField).toHaveBeenCalledWith('messages_processed', 400);
expect(mockPoint.intField).toHaveBeenCalledWith('messages_failed', 10);
expect(mockPoint.intField).toHaveBeenCalledWith('messages_dropped_total', 40);
expect(mockPoint.intField).toHaveBeenCalledWith('messages_dropped_rate_limit', 20);
expect(mockPoint.intField).toHaveBeenCalledWith('messages_dropped_queue_full', 15);
expect(mockPoint.intField).toHaveBeenCalledWith('messages_dropped_size', 5);
expect(mockPoint.floatField).toHaveBeenCalledWith('processing_time_avg_ms', 25.5);
expect(mockPoint.floatField).toHaveBeenCalledWith('processing_time_p95_ms', 50.2);
expect(mockPoint.floatField).toHaveBeenCalledWith('processing_time_max_ms', 100.8);
expect(mockPoint.intField).toHaveBeenCalledWith('rate_limit_current', 100);
expect(mockPoint.intField).toHaveBeenCalledWith('backpressure_active', 0);
expect(mockV2Utils.applyInfluxTags).toHaveBeenCalledWith(mockPoint, [
{ name: 'env', value: 'prod' },
]);
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
expect(mockWriteApi.writePoint).toHaveBeenCalledWith(mockPoint);
expect(mockWriteApi.close).toHaveBeenCalled();
expect(mockQueueManager.clearMetrics).toHaveBeenCalled();
});
test('should handle zero metrics', async () => {
mockQueueManager.getMetrics.mockReturnValue({
queueSize: 0,
queueMaxSize: 1000,
queueUtilizationPct: 0,
queuePending: 0,
messagesReceived: 0,
messagesQueued: 0,
messagesProcessed: 0,
messagesFailed: 0,
messagesDroppedTotal: 0,
messagesDroppedRateLimit: 0,
messagesDroppedQueueFull: 0,
messagesDroppedSize: 0,
processingTimeAvgMs: 0,
processingTimeP95Ms: 0,
processingTimeMaxMs: 0,
rateLimitCurrent: 0,
backpressureActive: 0,
});
await storeUserEventQueueMetricsV2();
expect(mockPoint.intField).toHaveBeenCalledWith('queue_size', 0);
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
test('should log verbose information', async () => {
await storeUserEventQueueMetricsV2();
expect(globals.logger.verbose).toHaveBeenCalledWith(
'USER EVENT QUEUE METRICS V2: Sent queue metrics data to InfluxDB'
);
});
});
describe('storeLogEventQueueMetricsV2', () => {
test('should return early when InfluxDB disabled', async () => {
utils.isInfluxDbEnabled.mockReturnValue(false);
await storeLogEventQueueMetricsV2();
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should return early when feature disabled', async () => {
globals.config.get.mockImplementation((path) => {
if (path.includes('enable')) return false;
return undefined;
});
await storeLogEventQueueMetricsV2();
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should return early when queue manager not initialized', async () => {
globals.udpQueueManagerLogEvents = null;
await storeLogEventQueueMetricsV2();
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
expect(globals.logger.warn).toHaveBeenCalledWith(
'LOG EVENT QUEUE METRICS V2: Queue manager not initialized'
);
});
test('should write complete log event queue metrics', async () => {
await storeLogEventQueueMetricsV2();
expect(Point).toHaveBeenCalledWith('event_queue_metrics');
expect(mockPoint.tag).toHaveBeenCalledWith('queue_type', 'log_events');
expect(mockPoint.tag).toHaveBeenCalledWith('host', 'test-host');
expect(mockPoint.intField).toHaveBeenCalledWith('queue_size', 100);
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
expect(mockQueueManager.clearMetrics).toHaveBeenCalled();
});
test('should handle high utilization', async () => {
mockQueueManager.getMetrics.mockReturnValue({
queueSize: 950,
queueMaxSize: 1000,
queueUtilizationPct: 95.0,
queuePending: 50,
messagesReceived: 10000,
messagesQueued: 9500,
messagesProcessed: 9000,
messagesFailed: 100,
messagesDroppedTotal: 400,
messagesDroppedRateLimit: 200,
messagesDroppedQueueFull: 150,
messagesDroppedSize: 50,
processingTimeAvgMs: 125.5,
processingTimeP95Ms: 250.2,
processingTimeMaxMs: 500.8,
rateLimitCurrent: 50,
backpressureActive: 1,
});
await storeLogEventQueueMetricsV2();
expect(mockPoint.floatField).toHaveBeenCalledWith('queue_utilization_pct', 95.0);
expect(mockPoint.intField).toHaveBeenCalledWith('backpressure_active', 1);
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
test('should log verbose information', async () => {
await storeLogEventQueueMetricsV2();
expect(globals.logger.verbose).toHaveBeenCalledWith(
'LOG EVENT QUEUE METRICS V2: Sent queue metrics data to InfluxDB'
);
});
});
});

View File

@@ -0,0 +1,177 @@
import { jest, describe, test, expect, beforeEach } from '@jest/globals';
const mockPoint = {
tag: jest.fn().mockReturnThis(),
stringField: jest.fn().mockReturnThis(),
};
const mockWriteApi = {
writePoints: jest.fn(),
close: jest.fn().mockResolvedValue(),
};
const mockGlobals = {
logger: {
info: jest.fn(),
verbose: jest.fn(),
debug: jest.fn(),
error: jest.fn(),
warn: jest.fn(),
silly: jest.fn(),
},
config: { get: jest.fn() },
influx: { getWriteApi: jest.fn(() => mockWriteApi) },
influxWriteApi: [{ serverName: 'server1' }],
getErrorMessage: jest.fn((err) => err.message),
};
jest.unstable_mockModule('../../../globals.js', () => ({ default: mockGlobals }));
const mockUtils = {
isInfluxDbEnabled: jest.fn(),
writeToInfluxWithRetry: jest.fn(),
};
jest.unstable_mockModule('../shared/utils.js', () => mockUtils);
describe('v2/sessions', () => {
let storeSessionsV2, globals, utils;
beforeEach(async () => {
jest.clearAllMocks();
globals = (await import('../../../globals.js')).default;
utils = await import('../shared/utils.js');
const sessions = await import('../v2/sessions.js');
storeSessionsV2 = sessions.storeSessionsV2;
// Set up influxWriteApi array with matching server
globals.influxWriteApi = [{ serverName: 'server1' }];
globals.config.get.mockImplementation((path) => {
if (path.includes('org')) return 'test-org';
if (path.includes('bucket')) return 'test-bucket';
return undefined;
});
utils.isInfluxDbEnabled.mockReturnValue(true);
utils.writeToInfluxWithRetry.mockImplementation(async (cb) => await cb());
mockWriteApi.writePoints.mockResolvedValue(undefined);
mockWriteApi.close.mockResolvedValue(undefined);
});
test('should return early when InfluxDB disabled', async () => {
utils.isInfluxDbEnabled.mockReturnValue(false);
const userSessions = {
serverName: 'server1',
host: 'host1',
virtualProxy: 'vp1',
sessionCount: 5,
uniqueUserList: 'user1,user2',
datapointInfluxdb: [mockPoint],
};
await storeSessionsV2(userSessions);
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should return early with invalid datapointInfluxdb (not array)', async () => {
const userSessions = {
serverName: 'server1',
host: 'host1',
virtualProxy: 'vp1',
sessionCount: 5,
uniqueUserList: 'user1,user2',
datapointInfluxdb: 'not-an-array',
};
await storeSessionsV2(userSessions);
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
expect(globals.logger.warn).toHaveBeenCalledWith(
expect.stringContaining('Invalid data format')
);
});
test('should return early when writeApi not found', async () => {
globals.influxWriteApi = [{ serverName: 'different-server' }];
const userSessions = {
serverName: 'server1',
host: 'host1',
virtualProxy: 'vp1',
sessionCount: 5,
uniqueUserList: 'user1,user2',
datapointInfluxdb: [mockPoint],
};
await storeSessionsV2(userSessions);
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
expect(globals.logger.warn).toHaveBeenCalledWith(
expect.stringContaining('Influxdb write API object not found')
);
});
test('should write session data successfully', async () => {
const userSessions = {
serverName: 'server1',
host: 'host1.example.com',
virtualProxy: '/virtual-proxy',
sessionCount: 10,
uniqueUserList: 'user1,user2,user3',
datapointInfluxdb: [mockPoint, mockPoint, mockPoint],
};
await storeSessionsV2(userSessions);
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
expect(mockWriteApi.writePoints).toHaveBeenCalledWith(userSessions.datapointInfluxdb);
expect(mockWriteApi.close).toHaveBeenCalled();
expect(globals.logger.verbose).toHaveBeenCalledWith(
expect.stringContaining('Sent user session data to InfluxDB')
);
});
test('should write empty session array', async () => {
const userSessions = {
serverName: 'server1',
host: 'host1',
virtualProxy: 'vp1',
sessionCount: 0,
uniqueUserList: '',
datapointInfluxdb: [],
};
await storeSessionsV2(userSessions);
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
expect(mockWriteApi.writePoints).toHaveBeenCalledWith([]);
});
test('should log silly debug information', async () => {
const userSessions = {
serverName: 'server1',
host: 'host1',
virtualProxy: 'vp1',
sessionCount: 5,
uniqueUserList: 'user1,user2',
datapointInfluxdb: [mockPoint],
};
await storeSessionsV2(userSessions);
expect(globals.logger.debug).toHaveBeenCalled();
expect(globals.logger.silly).toHaveBeenCalled();
});
test('should handle multiple datapoints', async () => {
const datapoints = Array(20).fill(mockPoint);
const userSessions = {
serverName: 'server1',
host: 'host1',
virtualProxy: 'vp1',
sessionCount: 20,
uniqueUserList: 'user1,user2,user3,user4,user5',
datapointInfluxdb: datapoints,
};
await storeSessionsV2(userSessions);
expect(mockWriteApi.writePoints).toHaveBeenCalledWith(datapoints);
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
});

View File

@@ -0,0 +1,229 @@
import { jest, describe, test, expect, beforeEach } from '@jest/globals';
const mockPoint = {
tag: jest.fn().mockReturnThis(),
stringField: jest.fn().mockReturnThis(),
};
const mockWriteApi = {
writePoint: jest.fn(),
close: jest.fn().mockResolvedValue(),
};
const mockGlobals = {
logger: {
info: jest.fn(),
verbose: jest.fn(),
debug: jest.fn(),
error: jest.fn(),
warn: jest.fn(),
silly: jest.fn(),
},
config: { get: jest.fn(), has: jest.fn() },
influx: { getWriteApi: jest.fn(() => mockWriteApi) },
getErrorMessage: jest.fn((err) => err.message),
};
jest.unstable_mockModule('../../../globals.js', () => ({ default: mockGlobals }));
jest.unstable_mockModule('@influxdata/influxdb-client', () => ({
Point: jest.fn(() => mockPoint),
}));
const mockUtils = {
isInfluxDbEnabled: jest.fn(),
writeToInfluxWithRetry: jest.fn(),
};
jest.unstable_mockModule('../shared/utils.js', () => mockUtils);
const mockV2Utils = {
applyInfluxTags: jest.fn(),
};
jest.unstable_mockModule('../v2/utils.js', () => mockV2Utils);
describe('v2/user-events', () => {
let storeUserEventV2, globals, utils, Point;
beforeEach(async () => {
jest.clearAllMocks();
globals = (await import('../../../globals.js')).default;
utils = await import('../shared/utils.js');
const InfluxClient = await import('@influxdata/influxdb-client');
Point = InfluxClient.Point;
const userEvents = await import('../v2/user-events.js');
storeUserEventV2 = userEvents.storeUserEventV2;
mockPoint.tag.mockReturnThis();
mockPoint.stringField.mockReturnThis();
globals.config.get.mockImplementation((path) => {
if (path.includes('org')) return 'test-org';
if (path.includes('bucket')) return 'test-bucket';
if (path.includes('userEvents.tags')) return [{ name: 'env', value: 'prod' }];
return undefined;
});
globals.config.has.mockReturnValue(true);
utils.isInfluxDbEnabled.mockReturnValue(true);
utils.writeToInfluxWithRetry.mockImplementation(async (fn) => await fn());
mockWriteApi.writePoint.mockResolvedValue(undefined);
});
test('should return early when InfluxDB disabled', async () => {
utils.isInfluxDbEnabled.mockReturnValue(false);
const msg = {
host: 'host1',
command: 'OpenApp',
user_directory: 'DOMAIN',
user_id: 'user1',
origin: 'QlikSense',
};
await storeUserEventV2(msg);
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
});
test('should return early with missing required fields', async () => {
const msg = {
host: 'host1',
command: 'OpenApp',
// missing user_directory, user_id, origin
};
await storeUserEventV2(msg);
expect(utils.writeToInfluxWithRetry).not.toHaveBeenCalled();
expect(globals.logger.warn).toHaveBeenCalledWith(
expect.stringContaining('Missing required fields')
);
});
test('should write complete user event with all fields', async () => {
const msg = {
host: 'host1.example.com',
command: 'OpenApp',
user_directory: 'DOMAIN',
user_id: 'john.doe',
origin: 'QlikSense',
appId: 'app-123',
appName: 'Sales Dashboard',
ua: {
browser: { name: 'Chrome', major: '120' },
os: { name: 'Windows', version: '10' },
},
};
await storeUserEventV2(msg);
expect(Point).toHaveBeenCalledWith('user_events');
expect(mockPoint.tag).toHaveBeenCalledWith('host', 'host1.example.com');
expect(mockPoint.tag).toHaveBeenCalledWith('event_action', 'OpenApp');
expect(mockPoint.tag).toHaveBeenCalledWith('userFull', 'DOMAIN\\john.doe');
expect(mockPoint.tag).toHaveBeenCalledWith('userDirectory', 'DOMAIN');
expect(mockPoint.tag).toHaveBeenCalledWith('userId', 'john.doe');
expect(mockPoint.tag).toHaveBeenCalledWith('origin', 'QlikSense');
expect(mockPoint.tag).toHaveBeenCalledWith('appId', 'app-123');
expect(mockPoint.tag).toHaveBeenCalledWith('appName', 'Sales Dashboard');
expect(mockPoint.tag).toHaveBeenCalledWith('uaBrowserName', 'Chrome');
expect(mockPoint.tag).toHaveBeenCalledWith('uaBrowserMajorVersion', '120');
expect(mockPoint.tag).toHaveBeenCalledWith('uaOsName', 'Windows');
expect(mockPoint.tag).toHaveBeenCalledWith('uaOsVersion', '10');
expect(mockPoint.stringField).toHaveBeenCalledWith('userFull', 'DOMAIN\\john.doe');
expect(mockPoint.stringField).toHaveBeenCalledWith('userId', 'john.doe');
expect(mockPoint.stringField).toHaveBeenCalledWith('appId_field', 'app-123');
expect(mockPoint.stringField).toHaveBeenCalledWith('appName_field', 'Sales Dashboard');
expect(mockV2Utils.applyInfluxTags).toHaveBeenCalledWith(mockPoint, [
{ name: 'env', value: 'prod' },
]);
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
expect(mockWriteApi.writePoint).toHaveBeenCalled();
expect(mockWriteApi.close).toHaveBeenCalled();
});
test('should handle event without app info', async () => {
const msg = {
host: 'host1',
command: 'Login',
user_directory: 'DOMAIN',
user_id: 'user1',
origin: 'QlikSense',
};
await storeUserEventV2(msg);
expect(mockPoint.tag).not.toHaveBeenCalledWith('appId', expect.anything());
expect(mockPoint.tag).not.toHaveBeenCalledWith('appName', expect.anything());
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
test('should handle event without user agent', async () => {
const msg = {
host: 'host1',
command: 'OpenApp',
user_directory: 'DOMAIN',
user_id: 'user1',
origin: 'QlikSense',
};
await storeUserEventV2(msg);
expect(mockPoint.tag).not.toHaveBeenCalledWith('uaBrowserName', expect.anything());
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
test('should handle partial user agent info', async () => {
const msg = {
host: 'host1',
command: 'OpenApp',
user_directory: 'DOMAIN',
user_id: 'user1',
origin: 'QlikSense',
ua: {
browser: { name: 'Firefox' }, // no major version
// no os info
},
};
await storeUserEventV2(msg);
expect(mockPoint.tag).toHaveBeenCalledWith('uaBrowserName', 'Firefox');
expect(mockPoint.tag).not.toHaveBeenCalledWith('uaBrowserMajorVersion', expect.anything());
expect(utils.writeToInfluxWithRetry).toHaveBeenCalled();
});
test('should log debug information', async () => {
const msg = {
host: 'host1',
command: 'OpenApp',
user_directory: 'DOMAIN',
user_id: 'user1',
origin: 'QlikSense',
};
await storeUserEventV2(msg);
expect(globals.logger.debug).toHaveBeenCalled();
expect(globals.logger.silly).toHaveBeenCalled();
expect(globals.logger.verbose).toHaveBeenCalledWith(
'USER EVENT V2: Sent user event data to InfluxDB'
);
});
test('should handle different event commands', async () => {
const commands = ['OpenApp', 'CreateApp', 'DeleteApp', 'ReloadApp'];
for (const command of commands) {
jest.clearAllMocks();
const msg = {
host: 'host1',
command,
user_directory: 'DOMAIN',
user_id: 'user1',
origin: 'QlikSense',
};
await storeUserEventV2(msg);
expect(mockPoint.tag).toHaveBeenCalledWith('event_action', command);
}
});
});

View File

@@ -0,0 +1,189 @@
import { jest, describe, test, expect, beforeEach } from '@jest/globals';
const mockPoint = {
tag: jest.fn().mockReturnThis(),
};
jest.unstable_mockModule('@influxdata/influxdb-client', () => ({
Point: jest.fn(() => mockPoint),
}));
describe('v2/utils', () => {
let applyInfluxTags, Point;
beforeEach(async () => {
jest.clearAllMocks();
const InfluxClient = await import('@influxdata/influxdb-client');
Point = InfluxClient.Point;
const utils = await import('../v2/utils.js');
applyInfluxTags = utils.applyInfluxTags;
mockPoint.tag.mockReturnThis();
});
test('should apply single tag', () => {
const tags = [{ name: 'env', value: 'prod' }];
const result = applyInfluxTags(mockPoint, tags);
expect(mockPoint.tag).toHaveBeenCalledWith('env', 'prod');
expect(result).toBe(mockPoint);
});
test('should apply multiple tags', () => {
const tags = [
{ name: 'env', value: 'prod' },
{ name: 'region', value: 'us-east' },
{ name: 'cluster', value: 'main' },
];
const result = applyInfluxTags(mockPoint, tags);
expect(mockPoint.tag).toHaveBeenCalledTimes(3);
expect(mockPoint.tag).toHaveBeenCalledWith('env', 'prod');
expect(mockPoint.tag).toHaveBeenCalledWith('region', 'us-east');
expect(mockPoint.tag).toHaveBeenCalledWith('cluster', 'main');
expect(result).toBe(mockPoint);
});
test('should handle null tags', () => {
const result = applyInfluxTags(mockPoint, null);
expect(mockPoint.tag).not.toHaveBeenCalled();
expect(result).toBe(mockPoint);
});
test('should handle undefined tags', () => {
const result = applyInfluxTags(mockPoint, undefined);
expect(mockPoint.tag).not.toHaveBeenCalled();
expect(result).toBe(mockPoint);
});
test('should handle empty array', () => {
const result = applyInfluxTags(mockPoint, []);
expect(mockPoint.tag).not.toHaveBeenCalled();
expect(result).toBe(mockPoint);
});
test('should skip tags with null values', () => {
const tags = [
{ name: 'env', value: 'prod' },
{ name: 'region', value: null },
{ name: 'cluster', value: 'main' },
];
const result = applyInfluxTags(mockPoint, tags);
expect(mockPoint.tag).toHaveBeenCalledTimes(2);
expect(mockPoint.tag).toHaveBeenCalledWith('env', 'prod');
expect(mockPoint.tag).toHaveBeenCalledWith('cluster', 'main');
expect(mockPoint.tag).not.toHaveBeenCalledWith('region', null);
expect(result).toBe(mockPoint);
});
test('should skip tags with undefined values', () => {
const tags = [
{ name: 'env', value: 'prod' },
{ name: 'region', value: undefined },
{ name: 'cluster', value: 'main' },
];
const result = applyInfluxTags(mockPoint, tags);
expect(mockPoint.tag).toHaveBeenCalledTimes(2);
expect(mockPoint.tag).toHaveBeenCalledWith('env', 'prod');
expect(mockPoint.tag).toHaveBeenCalledWith('cluster', 'main');
expect(result).toBe(mockPoint);
});
test('should skip tags without name', () => {
const tags = [
{ name: 'env', value: 'prod' },
{ value: 'no-name' },
{ name: 'cluster', value: 'main' },
];
const result = applyInfluxTags(mockPoint, tags);
expect(mockPoint.tag).toHaveBeenCalledTimes(2);
expect(mockPoint.tag).toHaveBeenCalledWith('env', 'prod');
expect(mockPoint.tag).toHaveBeenCalledWith('cluster', 'main');
expect(result).toBe(mockPoint);
});
test('should convert non-string values to strings', () => {
const tags = [
{ name: 'count', value: 123 },
{ name: 'enabled', value: true },
{ name: 'ratio', value: 3.14 },
];
const result = applyInfluxTags(mockPoint, tags);
expect(mockPoint.tag).toHaveBeenCalledWith('count', '123');
expect(mockPoint.tag).toHaveBeenCalledWith('enabled', 'true');
expect(mockPoint.tag).toHaveBeenCalledWith('ratio', '3.14');
expect(result).toBe(mockPoint);
});
test('should handle empty string values', () => {
const tags = [
{ name: 'env', value: '' },
{ name: 'region', value: 'us-east' },
];
const result = applyInfluxTags(mockPoint, tags);
expect(mockPoint.tag).toHaveBeenCalledTimes(2);
expect(mockPoint.tag).toHaveBeenCalledWith('env', '');
expect(mockPoint.tag).toHaveBeenCalledWith('region', 'us-east');
expect(result).toBe(mockPoint);
});
test('should handle zero as value', () => {
const tags = [{ name: 'count', value: 0 }];
const result = applyInfluxTags(mockPoint, tags);
expect(mockPoint.tag).toHaveBeenCalledWith('count', '0');
expect(result).toBe(mockPoint);
});
test('should handle false as value', () => {
const tags = [{ name: 'enabled', value: false }];
const result = applyInfluxTags(mockPoint, tags);
expect(mockPoint.tag).toHaveBeenCalledWith('enabled', 'false');
expect(result).toBe(mockPoint);
});
test('should handle non-array input', () => {
const result = applyInfluxTags(mockPoint, 'not-an-array');
expect(mockPoint.tag).not.toHaveBeenCalled();
expect(result).toBe(mockPoint);
});
test('should handle object instead of array', () => {
const result = applyInfluxTags(mockPoint, { name: 'env', value: 'prod' });
expect(mockPoint.tag).not.toHaveBeenCalled();
expect(result).toBe(mockPoint);
});
test('should support method chaining', () => {
const tags = [
{ name: 'env', value: 'prod' },
{ name: 'region', value: 'us-east' },
];
const result = applyInfluxTags(mockPoint, tags);
// The function returns the point for chaining
expect(result).toBe(mockPoint);
expect(typeof result.tag).toBe('function');
});
});

View File

@@ -58,68 +58,6 @@ describe('InfluxDB v3 Shared Utils', () => {
});
});
describe('useRefactoredInfluxDb', () => {
test('should always return true for InfluxDB v3 (legacy code removed)', () => {
globals.config.get.mockImplementation((key) => {
if (key === 'Butler-SOS.influxdbConfig.version') return 3;
if (key === 'Butler-SOS.influxdbConfig.useRefactoredCode') return false;
return undefined;
});
const result = utils.useRefactoredInfluxDb();
expect(result).toBe(true);
});
test('should return true when feature flag is enabled for v1/v2', () => {
globals.config.get.mockImplementation((key) => {
if (key === 'Butler-SOS.influxdbConfig.version') return 1;
if (key === 'Butler-SOS.influxdbConfig.useRefactoredCode') return true;
return undefined;
});
const result = utils.useRefactoredInfluxDb();
expect(result).toBe(true);
});
test('should return false when feature flag is disabled for v1/v2', () => {
globals.config.get.mockImplementation((key) => {
if (key === 'Butler-SOS.influxdbConfig.version') return 2;
if (key === 'Butler-SOS.influxdbConfig.useRefactoredCode') return false;
return undefined;
});
const result = utils.useRefactoredInfluxDb();
expect(result).toBe(false);
});
test('should return true for v1 even when feature flag is undefined (v1 always uses refactored code)', () => {
globals.config.get.mockImplementation((key) => {
if (key === 'Butler-SOS.influxdbConfig.version') return 1;
if (key === 'Butler-SOS.influxdbConfig.useRefactoredCode') return undefined;
return undefined;
});
const result = utils.useRefactoredInfluxDb();
expect(result).toBe(true);
});
test('should return false when feature flag is undefined for v2', () => {
globals.config.get.mockImplementation((key) => {
if (key === 'Butler-SOS.influxdbConfig.version') return 2;
if (key === 'Butler-SOS.influxdbConfig.useRefactoredCode') return undefined;
return undefined;
});
const result = utils.useRefactoredInfluxDb();
expect(result).toBe(false);
});
});
describe('isInfluxDbEnabled', () => {
test('should return true when client exists', () => {
globals.influx = { write: jest.fn() };

View File

@@ -1,5 +1,5 @@
import globals from '../../globals.js';
import { getInfluxDbVersion, useRefactoredInfluxDb } from './shared/utils.js';
import { getInfluxDbVersion } from './shared/utils.js';
// Import version-specific implementations
import { storeHealthMetricsV1 } from './v1/health-metrics.js';
@@ -45,7 +45,7 @@ export async function postHealthMetricsToInfluxdb(serverName, host, body, server
return storeHealthMetricsV1(serverTags, body);
}
if (version === 2) {
return storeHealthMetricsV2(serverName, host, body);
return storeHealthMetricsV2(serverName, host, body, serverTags);
}
if (version === 3) {
return postHealthMetricsToInfluxdbV3(serverName, host, body, serverTags);

View File

@@ -1,14 +1,11 @@
import { useRefactoredInfluxDb, getFormattedTime } from './shared/utils.js';
import { getFormattedTime } from './shared/utils.js';
import * as factory from './factory.js';
import globals from '../../globals.js';
// Import original implementation for fallback
import * as original from '../post-to-influxdb.js';
/**
* Main facade that routes to either refactored or original implementation based on feature flag.
* Main facade that routes to version-specific implementations via factory.
*
* This allows for safe migration by testing refactored code alongside original implementation.
* All InfluxDB versions (v1, v2, v3) now use refactored modular code.
*/
/**
@@ -23,8 +20,6 @@ export { getFormattedTime };
/**
* Posts health metrics data from Qlik Sense to InfluxDB.
*
* Routes to refactored or original implementation based on feature flag.
*
* @param {string} serverName - The name of the Qlik Sense server
* @param {string} host - The hostname or IP of the Qlik Sense server
* @param {object} body - The health metrics data from Sense engine healthcheck API
@@ -32,198 +27,89 @@ export { getFormattedTime };
* @returns {Promise<void>} Promise that resolves when data has been posted to InfluxDB
*/
export async function postHealthMetricsToInfluxdb(serverName, host, body, serverTags) {
if (useRefactoredInfluxDb()) {
try {
return await factory.postHealthMetricsToInfluxdb(serverName, host, body, serverTags);
} catch (err) {
// If refactored code not yet implemented for this version, fall back to original
return await original.postHealthMetricsToInfluxdb(serverName, host, body, serverTags);
}
}
return await original.postHealthMetricsToInfluxdb(serverName, host, body, serverTags);
return await factory.postHealthMetricsToInfluxdb(serverName, host, body, serverTags);
}
/**
* Posts proxy sessions data to InfluxDB.
*
* Routes to refactored or original implementation based on feature flag.
*
* @param {object} userSessions - User session data
* @returns {Promise<void>}
*/
export async function postProxySessionsToInfluxdb(userSessions) {
if (useRefactoredInfluxDb()) {
try {
return await factory.postProxySessionsToInfluxdb(userSessions);
} catch (err) {
// If refactored code not yet implemented for this version, fall back to original
return await original.postProxySessionsToInfluxdb(userSessions);
}
}
return await original.postProxySessionsToInfluxdb(userSessions);
return await factory.postProxySessionsToInfluxdb(userSessions);
}
/**
* Posts Butler SOS's own memory usage to InfluxDB.
*
* Routes to refactored or original implementation based on feature flag.
*
* @param {object} memory - Memory usage data object
* @returns {Promise<void>}
*/
export async function postButlerSOSMemoryUsageToInfluxdb(memory) {
if (useRefactoredInfluxDb()) {
try {
return await factory.postButlerSOSMemoryUsageToInfluxdb(memory);
} catch (err) {
// If refactored code not yet implemented for this version, fall back to original
return await original.postButlerSOSMemoryUsageToInfluxdb(memory);
}
}
return await original.postButlerSOSMemoryUsageToInfluxdb(memory);
return await factory.postButlerSOSMemoryUsageToInfluxdb(memory);
}
/**
* Posts user events to InfluxDB.
*
* Routes to refactored or original implementation based on feature flag.
*
* @param {object} msg - The user event message
* @returns {Promise<void>}
*/
export async function postUserEventToInfluxdb(msg) {
if (useRefactoredInfluxDb()) {
try {
return await factory.postUserEventToInfluxdb(msg);
} catch (err) {
// If refactored code not yet implemented for this version, fall back to original globals.logger.error(`INFLUXDB ROUTING: User event - falling back to legacy code due to error: ${err.message}`);
globals.logger.debug(`INFLUXDB ROUTING: User event - error stack: ${err.stack}`);
return await original.postUserEventToInfluxdb(msg);
}
}
return await original.postUserEventToInfluxdb(msg);
return await factory.postUserEventToInfluxdb(msg);
}
/**
* Posts log events to InfluxDB.
*
* Routes to refactored or original implementation based on feature flag.
*
* @param {object} msg - The log event message
* @returns {Promise<void>}
*/
export async function postLogEventToInfluxdb(msg) {
if (useRefactoredInfluxDb()) {
try {
return await factory.postLogEventToInfluxdb(msg);
} catch (err) {
// If refactored code not yet implemented for this version, fall back to original globals.logger.error(`INFLUXDB ROUTING: Log event - falling back to legacy code due to error: ${err.message}`);
globals.logger.debug(`INFLUXDB ROUTING: Log event - error stack: ${err.stack}`);
return await original.postLogEventToInfluxdb(msg);
}
}
return await original.postLogEventToInfluxdb(msg);
return await factory.postLogEventToInfluxdb(msg);
}
/**
* Stores event counts to InfluxDB.
*
* Routes to refactored or original implementation based on feature flag.
*
* @param {string} eventsSinceMidnight - Events since midnight data
* @param {string} eventsLastHour - Events last hour data
* @param {string} eventsSinceMidnight - Events since midnight data (unused, kept for compatibility)
* @param {string} eventsLastHour - Events last hour data (unused, kept for compatibility)
* @returns {Promise<void>}
*/
export async function storeEventCountInfluxDB(eventsSinceMidnight, eventsLastHour) {
if (useRefactoredInfluxDb()) {
try {
return await factory.storeEventCountInfluxDB();
} catch (err) {
// If refactored code not yet implemented for this version, fall back to original
return await original.storeEventCountInfluxDB(eventsSinceMidnight, eventsLastHour);
}
}
return await original.storeEventCountInfluxDB(eventsSinceMidnight, eventsLastHour);
return await factory.storeEventCountInfluxDB();
}
/**
* Stores rejected event counts to InfluxDB.
*
* Routes to refactored or original implementation based on feature flag.
*
* @param {object} rejectedSinceMidnight - Rejected events since midnight
* @param {object} rejectedLastHour - Rejected events last hour
* @param {object} rejectedSinceMidnight - Rejected events since midnight (unused, kept for compatibility)
* @param {object} rejectedLastHour - Rejected events last hour (unused, kept for compatibility)
* @returns {Promise<void>}
*/
export async function storeRejectedEventCountInfluxDB(rejectedSinceMidnight, rejectedLastHour) {
if (useRefactoredInfluxDb()) {
try {
return await factory.storeRejectedEventCountInfluxDB();
} catch (err) {
// If refactored code not yet implemented for this version, fall back to original
return await original.storeRejectedEventCountInfluxDB(
rejectedSinceMidnight,
rejectedLastHour
);
}
}
return await original.storeRejectedEventCountInfluxDB(rejectedSinceMidnight, rejectedLastHour);
return await factory.storeRejectedEventCountInfluxDB();
}
/**
* Stores user event queue metrics to InfluxDB.
*
* Routes to refactored or original implementation based on feature flag.
*
* @param {object} queueMetrics - Queue metrics data
* @param {object} queueMetrics - Queue metrics data (unused, kept for compatibility)
* @returns {Promise<void>}
*/
export async function postUserEventQueueMetricsToInfluxdb(queueMetrics) {
if (useRefactoredInfluxDb()) {
try {
return await factory.postUserEventQueueMetricsToInfluxdb();
} catch (err) {
// If refactored code not yet implemented for this version, fall back to original
globals.logger.error(
`INFLUXDB ROUTING: User event queue metrics - falling back to legacy code due to error: ${err.message}`
);
globals.logger.debug(
`INFLUXDB ROUTING: User event queue metrics - error stack: ${err.stack}`
);
return await original.postUserEventQueueMetricsToInfluxdb(queueMetrics);
}
}
globals.logger.verbose(
'INFLUXDB ROUTING: User event queue metrics - using original implementation'
);
return await original.postUserEventQueueMetricsToInfluxdb(queueMetrics);
return await factory.postUserEventQueueMetricsToInfluxdb();
}
/**
* Stores log event queue metrics to InfluxDB.
*
* Routes to refactored or original implementation based on feature flag.
*
* @param {object} queueMetrics - Queue metrics data
* @param {object} queueMetrics - Queue metrics data (unused, kept for compatibility)
* @returns {Promise<void>}
*/
export async function postLogEventQueueMetricsToInfluxdb(queueMetrics) {
if (useRefactoredInfluxDb()) {
try {
return await factory.postLogEventQueueMetricsToInfluxdb();
} catch (err) {
// If refactored code not yet implemented for this version, fall back to original
globals.logger.error(
`INFLUXDB ROUTING: Log event queue metrics - falling back to legacy code due to error: ${err.message}`
);
globals.logger.debug(
`INFLUXDB ROUTING: Log event queue metrics - error stack: ${err.stack}`
);
return await original.postLogEventQueueMetricsToInfluxdb(queueMetrics);
}
}
return await original.postLogEventQueueMetricsToInfluxdb(queueMetrics);
return await factory.postLogEventQueueMetricsToInfluxdb();
}
/**

View File

@@ -156,29 +156,6 @@ export function getInfluxDbVersion() {
return globals.config.get('Butler-SOS.influxdbConfig.version');
}
/**
* Checks if the refactored InfluxDB code path should be used.
*
* For v1: Always returns true (legacy code removed)
* For v3: Always returns true (legacy code removed)
* For v2: Uses feature flag for gradual migration
*
* @returns {boolean} True if refactored code should be used
*/
export function useRefactoredInfluxDb() {
const version = getInfluxDbVersion();
// v1 always uses refactored code (legacy implementation removed)
// v3 always uses refactored code (legacy implementation removed)
if (version === 1 || version === 3) {
return true;
}
// v2 uses feature flag for gradual migration
// Default to false for backward compatibility
return globals.config.get('Butler-SOS.influxdbConfig.useRefactoredCode') === true;
}
/**
* Applies tags from a tags object to an InfluxDB Point3 object.
* This is needed for v3 as it doesn't have automatic default tags like v2.

View File

@@ -1,56 +1,79 @@
import { Point } from '@influxdata/influxdb-client';
import globals from '../../../globals.js';
import { isInfluxDbEnabled, writeToInfluxWithRetry } from '../shared/utils.js';
/**
* Store Butler SOS memory usage to InfluxDB v2
* Posts Butler SOS memory usage metrics to InfluxDB v2.
*
* @param {object} memory - Memory usage data
* @returns {Promise<void>}
* This function captures memory usage metrics from the Butler SOS process itself
* and stores them in InfluxDB v2.
*
* @param {object} memory - Memory usage data object
* @param {string} memory.instanceTag - Instance identifier tag
* @param {number} memory.heapUsedMByte - Heap used in MB
* @param {number} memory.heapTotalMByte - Total heap size in MB
* @param {number} memory.externalMemoryMByte - External memory usage in MB
* @param {number} memory.processMemoryMByte - Process memory usage in MB
* @returns {Promise<void>} Promise that resolves when data has been posted to InfluxDB
*/
export async function storeButlerMemoryV2(memory) {
try {
const butlerVersion = globals.appVersion;
globals.logger.debug(`MEMORY USAGE V2: Memory usage ${JSON.stringify(memory, null, 2)}`);
// Create write API with options
const writeOptions = {
flushInterval: 5000,
maxRetries: 2,
};
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', writeOptions);
if (!writeApi) {
globals.logger.warn('MEMORY USAGE V2: Influxdb write API object not found');
return;
}
// Create point using v2 Point class
const point = new Point('butlersos_memory_usage')
.tag('butler_sos_instance', memory.instanceTag)
.tag('version', butlerVersion)
.floatField('heap_used', memory.heapUsedMByte)
.floatField('heap_total', memory.heapTotalMByte)
.floatField('external', memory.externalMemoryMByte)
.floatField('process_memory', memory.processMemoryMByte);
globals.logger.silly(
`MEMORY USAGE V2: Influxdb datapoint for Butler SOS memory usage: ${JSON.stringify(
point,
null,
2
)}`
);
await writeApi.writePoint(point);
globals.logger.verbose('MEMORY USAGE V2: Sent Butler SOS memory usage data to InfluxDB');
} catch (err) {
globals.logger.error(
`MEMORY USAGE V2: Error saving Butler SOS memory data: ${globals.getErrorMessage(err)}`
);
throw err;
// Check if InfluxDB v2 is enabled
if (!isInfluxDbEnabled()) {
return;
}
// Validate input
if (!memory || typeof memory !== 'object') {
globals.logger.warn('MEMORY USAGE V2: Invalid memory data provided');
return;
}
const butlerVersion = globals.appVersion;
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
// Create point using v2 Point class
const point = new Point('butlersos_memory_usage')
.tag('butler_sos_instance', memory.instanceTag)
.tag('version', butlerVersion)
.floatField('heap_used', memory.heapUsedMByte)
.floatField('heap_total', memory.heapTotalMByte)
.floatField('external', memory.externalMemoryMByte)
.floatField('process_memory', memory.processMemoryMByte);
globals.logger.silly(
`MEMORY USAGE V2: Influxdb datapoint for Butler SOS memory usage: ${JSON.stringify(
point,
null,
2
)}`
);
// Write to InfluxDB with retry logic
await writeToInfluxWithRetry(
async () => {
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', {
flushInterval: 5000,
maxRetries: 0,
});
try {
await writeApi.writePoint(point);
await writeApi.close();
} catch (err) {
try {
await writeApi.close();
} catch (closeErr) {
// Ignore close errors
}
throw err;
}
},
'Memory usage metrics',
'v2',
''
);
globals.logger.verbose('MEMORY USAGE V2: Sent Butler SOS memory usage data to InfluxDB');
}

View File

@@ -1,217 +1,206 @@
import { Point } from '@influxdata/influxdb-client';
import globals from '../../../globals.js';
import { logError } from '../../log-error.js';
import { isInfluxDbEnabled, writeToInfluxWithRetry } from '../shared/utils.js';
import { applyInfluxTags } from './utils.js';
/**
* Store event counts to InfluxDB v2
* Posts event counts to InfluxDB v2.
*
* @description
* This function reads arrays of log and user events from the `udpEvents` object,
* and stores the data in InfluxDB v2. The data is written to a measurement named after
* the `Butler-SOS.qlikSenseEvents.eventCount.influxdb.measurementName` config setting.
*
* Aggregates and stores counts for log and user events
*
* @returns {Promise<void>}
* @returns {Promise<void>} Promise that resolves when data has been posted to InfluxDB
* @throws {Error} Error if unable to write data to InfluxDB
*/
export async function storeEventCountV2() {
try {
// Get array of log events
const logEvents = await globals.udpEvents.getLogEvents();
const userEvents = await globals.udpEvents.getUserEvents();
globals.logger.debug('EVENT COUNT V2: Starting to store event counts');
globals.logger.debug(`EVENT COUNT V2: Log events: ${JSON.stringify(logEvents, null, 2)}`);
globals.logger.debug(`EVENT COUNT V2: User events: ${JSON.stringify(userEvents, null, 2)}`);
// Are there any events to store?
if (logEvents.length === 0 && userEvents.length === 0) {
globals.logger.verbose('EVENT COUNT V2: No events to store in InfluxDB');
return;
}
// Create write API with options
const writeOptions = {
flushInterval: 5000,
maxRetries: 2,
};
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', writeOptions);
if (!writeApi) {
globals.logger.warn('EVENT COUNT V2: Influxdb write API object not found');
return;
}
const points = [];
// Get measurement name to use for event counts
const measurementName = globals.config.get(
'Butler-SOS.qlikSenseEvents.eventCount.influxdb.measurementName'
);
// Loop through data in log events and create datapoints
for (const event of logEvents) {
const point = new Point(measurementName)
.tag('event_type', 'log')
.tag('source', event.source)
.tag('host', event.host)
.tag('subsystem', event.subsystem)
.intField('counter', event.counter);
// Add static tags from config file
if (
globals.config.has('Butler-SOS.qlikSenseEvents.eventCount.influxdb.tags') &&
globals.config.get('Butler-SOS.qlikSenseEvents.eventCount.influxdb.tags') !==
null &&
globals.config.get('Butler-SOS.qlikSenseEvents.eventCount.influxdb.tags').length > 0
) {
const configTags = globals.config.get(
'Butler-SOS.qlikSenseEvents.eventCount.influxdb.tags'
);
for (const item of configTags) {
point.tag(item.name, item.value);
}
}
points.push(point);
}
// Loop through data in user events and create datapoints
for (const event of userEvents) {
const point = new Point(measurementName)
.tag('event_type', 'user')
.tag('source', event.source)
.tag('host', event.host)
.tag('subsystem', event.subsystem)
.intField('counter', event.counter);
// Add static tags from config file
if (
globals.config.has('Butler-SOS.qlikSenseEvents.eventCount.influxdb.tags') &&
globals.config.get('Butler-SOS.qlikSenseEvents.eventCount.influxdb.tags') !==
null &&
globals.config.get('Butler-SOS.qlikSenseEvents.eventCount.influxdb.tags').length > 0
) {
const configTags = globals.config.get(
'Butler-SOS.qlikSenseEvents.eventCount.influxdb.tags'
);
for (const item of configTags) {
point.tag(item.name, item.value);
}
}
points.push(point);
}
await writeApi.writePoints(points);
globals.logger.verbose('EVENT COUNT V2: Sent event count data to InfluxDB');
} catch (err) {
logError('EVENT COUNT V2: Error saving data', err);
throw err;
// Check if InfluxDB v2 is enabled
if (!isInfluxDbEnabled()) {
return;
}
// Get array of log events
const logEvents = await globals.udpEvents.getLogEvents();
const userEvents = await globals.udpEvents.getUserEvents();
globals.logger.debug(`EVENT COUNT V2: Log events: ${JSON.stringify(logEvents, null, 2)}`);
globals.logger.debug(`EVENT COUNT V2: User events: ${JSON.stringify(userEvents, null, 2)}`);
// Are there any events to store?
if (logEvents.length === 0 && userEvents.length === 0) {
globals.logger.verbose('EVENT COUNT V2: No events to store in InfluxDB');
return;
}
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
const measurementName = globals.config.get(
'Butler-SOS.qlikSenseEvents.eventCount.influxdb.measurementName'
);
const configTags = globals.config.get('Butler-SOS.qlikSenseEvents.eventCount.influxdb.tags');
const points = [];
// Loop through data in log events and create datapoints
for (const event of logEvents) {
const point = new Point(measurementName)
.tag('event_type', 'log')
.tag('source', event.source)
.tag('host', event.host)
.tag('subsystem', event.subsystem)
.intField('counter', event.counter);
// Add static tags from config file
applyInfluxTags(point, configTags);
points.push(point);
}
// Loop through data in user events and create datapoints
for (const event of userEvents) {
const point = new Point(measurementName)
.tag('event_type', 'user')
.tag('source', event.source)
.tag('host', event.host)
.tag('subsystem', event.subsystem)
.intField('counter', event.counter);
// Add static tags from config file
applyInfluxTags(point, configTags);
points.push(point);
}
// Write to InfluxDB with retry logic
await writeToInfluxWithRetry(
async () => {
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', {
flushInterval: 5000,
maxRetries: 0,
});
try {
await writeApi.writePoints(points);
await writeApi.close();
} catch (err) {
try {
await writeApi.close();
} catch (closeErr) {
// Ignore close errors
}
throw err;
}
},
'Event count metrics',
'v2',
''
);
globals.logger.verbose('EVENT COUNT V2: Sent event count data to InfluxDB');
}
/**
* Store rejected event counts to InfluxDB v2
* Tracks events that were rejected due to validation failures or rate limiting
* Posts rejected event counts to InfluxDB v2.
*
* @returns {Promise<void>}
* @description
* Tracks events that were rejected by Butler SOS due to validation failures,
* rate limiting, or filtering rules. Helps monitor data quality and filtering effectiveness.
*
* @returns {Promise<void>} Promise that resolves when data has been posted to InfluxDB
* @throws {Error} Error if unable to write data to InfluxDB
*/
export async function storeRejectedEventCountV2() {
try {
// Get array of rejected log events
const rejectedLogEvents = await globals.rejectedEvents.getRejectedLogEvents();
globals.logger.debug('REJECTED EVENT COUNT V2: Starting to store rejected event counts');
globals.logger.debug(
`REJECTED EVENT COUNT V2: Rejected log events: ${JSON.stringify(
rejectedLogEvents,
null,
2
)}`
);
// Are there any events to store?
if (rejectedLogEvents.length === 0) {
globals.logger.verbose('REJECTED EVENT COUNT V2: No events to store in InfluxDB');
return;
}
// Create write API with options
const writeOptions = {
flushInterval: 5000,
maxRetries: 2,
};
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', writeOptions);
if (!writeApi) {
globals.logger.warn('REJECTED EVENT COUNT V2: Influxdb write API object not found');
return;
}
const points = [];
// Get measurement name to use for rejected events
const measurementName = globals.config.get(
'Butler-SOS.qlikSenseEvents.rejectedEventCount.influxdb.measurementName'
);
// Loop through data in rejected log events and create datapoints
for (const event of rejectedLogEvents) {
if (event.source === 'qseow-qix-perf') {
// For qix-perf events, include app info and performance metrics
let point = new Point(measurementName)
.tag('source', event.source)
.tag('app_id', event.appId)
.tag('method', event.method)
.tag('object_type', event.objectType)
.intField('counter', event.counter)
.floatField('process_time', event.processTime);
if (event?.appName?.length > 0) {
point.tag('app_name', event.appName).tag('app_name_set', 'true');
} else {
point.tag('app_name_set', 'false');
}
// Add static tags from config file
if (
globals.config.has(
'Butler-SOS.logEvents.enginePerformanceMonitor.trackRejectedEvents.tags'
) &&
globals.config.get(
'Butler-SOS.logEvents.enginePerformanceMonitor.trackRejectedEvents.tags'
) !== null &&
globals.config.get(
'Butler-SOS.logEvents.enginePerformanceMonitor.trackRejectedEvents.tags'
).length > 0
) {
const configTags = globals.config.get(
'Butler-SOS.logEvents.enginePerformanceMonitor.trackRejectedEvents.tags'
);
for (const item of configTags) {
point.tag(item.name, item.value);
}
}
points.push(point);
} else {
const point = new Point(measurementName)
.tag('source', event.source)
.intField('counter', event.counter);
points.push(point);
}
}
await writeApi.writePoints(points);
globals.logger.verbose(
'REJECTED EVENT COUNT V2: Sent rejected event count data to InfluxDB'
);
} catch (err) {
logError('REJECTED EVENT COUNT V2: Error saving data', err);
throw err;
// Check if InfluxDB v2 is enabled
if (!isInfluxDbEnabled()) {
return;
}
// Get array of rejected log events
const rejectedLogEvents = await globals.rejectedEvents.getRejectedLogEvents();
globals.logger.debug(
`REJECTED EVENT COUNT V2: Rejected log events: ${JSON.stringify(
rejectedLogEvents,
null,
2
)}`
);
// Are there any events to store?
if (rejectedLogEvents.length === 0) {
globals.logger.verbose('REJECTED EVENT COUNT V2: No events to store in InfluxDB');
return;
}
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
const measurementName = globals.config.get(
'Butler-SOS.qlikSenseEvents.rejectedEventCount.influxdb.measurementName'
);
const points = [];
// Loop through data in rejected log events and create datapoints
for (const event of rejectedLogEvents) {
if (event.source === 'qseow-qix-perf') {
// For qix-perf events, include app info and performance metrics
const point = new Point(measurementName)
.tag('source', event.source)
.tag('app_id', event.appId)
.tag('method', event.method)
.tag('object_type', event.objectType)
.intField('counter', event.counter)
.floatField('process_time', event.processTime);
if (event?.appName?.length > 0) {
point.tag('app_name', event.appName).tag('app_name_set', 'true');
} else {
point.tag('app_name_set', 'false');
}
// Add static tags from config file
const perfMonitorTags = globals.config.get(
'Butler-SOS.logEvents.enginePerformanceMonitor.trackRejectedEvents.tags'
);
applyInfluxTags(point, perfMonitorTags);
points.push(point);
} else {
const point = new Point(measurementName)
.tag('source', event.source)
.intField('counter', event.counter);
points.push(point);
}
}
// Write to InfluxDB with retry logic
await writeToInfluxWithRetry(
async () => {
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', {
flushInterval: 5000,
maxRetries: 0,
});
try {
await writeApi.writePoints(points);
await writeApi.close();
} catch (err) {
try {
await writeApi.close();
} catch (closeErr) {
// Ignore close errors
}
throw err;
}
},
'Rejected event count metrics',
'v2',
''
);
globals.logger.verbose('REJECTED EVENT COUNT V2: Sent rejected event count data to InfluxDB');
}

View File

@@ -1,151 +1,191 @@
import { Point } from '@influxdata/influxdb-client';
import globals from '../../../globals.js';
import { getFormattedTime, processAppDocuments } from '../shared/utils.js';
import {
getFormattedTime,
processAppDocuments,
isInfluxDbEnabled,
writeToInfluxWithRetry,
} from '../shared/utils.js';
/**
* Store health metrics from multiple Sense engines to InfluxDB v2
* Posts health metrics data from Qlik Sense to InfluxDB v2.
*
* This function processes health data from the Sense engine's healthcheck API and
* formats it for storage in InfluxDB v2. It handles various metrics including:
* - CPU usage
* - Memory usage (committed, allocated, free)
* - Cache metrics (hits, lookups, additions, replacements)
* - Active/loaded/in-memory apps
* - Session counts (active, total)
* - User counts (active, total)
* - Server version and uptime
*
* @param {string} serverName - The name of the Qlik Sense server
* @param {string} host - The hostname or IP of the Qlik Sense server
* @param {object} body - Health metrics data from Sense engine
* @param {object} serverTags - Server-specific tags to add to datapoints
* @returns {Promise<void>}
*/
export async function storeHealthMetricsV2(serverName, host, body) {
try {
// Find writeApi for the server specified by serverName
const writeApi = globals.influxWriteApi.find(
(element) => element.serverName === serverName
);
export async function storeHealthMetricsV2(serverName, host, body, serverTags) {
globals.logger.debug(`HEALTH METRICS V2: Health data: ${JSON.stringify(body, null, 2)}`);
if (!writeApi) {
globals.logger.warn(
`HEALTH METRICS V2: Influxdb write API object not found for host ${host}`
);
return;
}
// Process app names for different document types
const [appNamesActive, sessionAppNamesActive] = await processAppDocuments(
body.apps.active_docs
);
const [appNamesLoaded, sessionAppNamesLoaded] = await processAppDocuments(
body.apps.loaded_docs
);
const [appNamesInMemory, sessionAppNamesInMemory] = await processAppDocuments(
body.apps.in_memory_docs
);
const formattedTime = getFormattedTime(body.started);
// Create points using v2 Point class
const points = [
new Point('sense_server')
.stringField('version', body.version)
.stringField('started', body.started)
.stringField('uptime', formattedTime),
new Point('mem')
.floatField('comitted', body.mem.committed)
.floatField('allocated', body.mem.allocated)
.floatField('free', body.mem.free),
new Point('apps')
.intField('active_docs_count', body.apps.active_docs.length)
.intField('loaded_docs_count', body.apps.loaded_docs.length)
.intField('in_memory_docs_count', body.apps.in_memory_docs.length)
.stringField(
'active_docs',
globals.config.get('Butler-SOS.influxdbConfig.includeFields.activeDocs')
? body.apps.active_docs
: ''
)
.stringField(
'active_docs_names',
globals.config.get('Butler-SOS.appNames.enableAppNameExtract') &&
globals.config.get('Butler-SOS.influxdbConfig.includeFields.activeDocs')
? appNamesActive.toString()
: ''
)
.stringField(
'active_session_docs_names',
globals.config.get('Butler-SOS.appNames.enableAppNameExtract') &&
globals.config.get('Butler-SOS.influxdbConfig.includeFields.activeDocs')
? sessionAppNamesActive.toString()
: ''
)
.stringField(
'loaded_docs',
globals.config.get('Butler-SOS.influxdbConfig.includeFields.loadedDocs')
? body.apps.loaded_docs
: ''
)
.stringField(
'loaded_docs_names',
globals.config.get('Butler-SOS.appNames.enableAppNameExtract') &&
globals.config.get('Butler-SOS.influxdbConfig.includeFields.loadedDocs')
? appNamesLoaded.toString()
: ''
)
.stringField(
'loaded_session_docs_names',
globals.config.get('Butler-SOS.appNames.enableAppNameExtract') &&
globals.config.get('Butler-SOS.influxdbConfig.includeFields.loadedDocs')
? sessionAppNamesLoaded.toString()
: ''
)
.stringField(
'in_memory_docs',
globals.config.get('Butler-SOS.influxdbConfig.includeFields.inMemoryDocs')
? body.apps.in_memory_docs
: ''
)
.stringField(
'in_memory_docs_names',
globals.config.get('Butler-SOS.appNames.enableAppNameExtract') &&
globals.config.get('Butler-SOS.influxdbConfig.includeFields.inMemoryDocs')
? appNamesInMemory.toString()
: ''
)
.stringField(
'in_memory_session_docs_names',
globals.config.get('Butler-SOS.appNames.enableAppNameExtract') &&
globals.config.get('Butler-SOS.influxdbConfig.includeFields.inMemoryDocs')
? sessionAppNamesInMemory.toString()
: ''
)
.uintField('calls', body.apps.calls)
.uintField('selections', body.apps.selections),
new Point('cpu').floatField('total', body.cpu.total),
new Point('session')
.uintField('active', body.session.active)
.uintField('total', body.session.total),
new Point('users')
.uintField('active', body.users.active)
.uintField('total', body.users.total),
new Point('cache')
.uintField('hits', body.cache.hits)
.uintField('lookups', body.cache.lookups)
.intField('added', body.cache.added)
.intField('replaced', body.cache.replaced)
.intField('bytes_added', body.cache.bytes_added),
new Point('saturated').booleanField('saturated', body.saturated),
];
await writeApi.writeAPI.writePoints(points);
globals.logger.verbose(`HEALTH METRICS V2: Stored health data from server: ${serverName}`);
} catch (err) {
// Track error count
await globals.errorTracker.incrementError('INFLUXDB_V2_WRITE', serverName);
globals.logger.error(
`HEALTH METRICS V2: Error saving health data: ${globals.getErrorMessage(err)}`
);
throw err;
// Check if InfluxDB v2 is enabled
if (!isInfluxDbEnabled()) {
return;
}
// Validate input
if (!body || typeof body !== 'object') {
globals.logger.warn(`HEALTH METRICS V2: Invalid health data from server ${serverName}`);
return;
}
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
// Process app names for different document types
const { appNames: appNamesActive, sessionAppNames: sessionAppNamesActive } =
await processAppDocuments(body.apps.active_docs, 'HEALTH METRICS', 'active');
const { appNames: appNamesLoaded, sessionAppNames: sessionAppNamesLoaded } =
await processAppDocuments(body.apps.loaded_docs, 'HEALTH METRICS', 'loaded');
const { appNames: appNamesInMemory, sessionAppNames: sessionAppNamesInMemory } =
await processAppDocuments(body.apps.in_memory_docs, 'HEALTH METRICS', 'in memory');
const formattedTime = getFormattedTime(body.started);
// Create points using v2 Point class
const points = [
new Point('sense_server')
.stringField('version', body.version)
.stringField('started', body.started)
.stringField('uptime', formattedTime),
new Point('mem')
.floatField('comitted', body.mem.committed)
.floatField('allocated', body.mem.allocated)
.floatField('free', body.mem.free),
new Point('apps')
.intField('active_docs_count', body.apps.active_docs.length)
.intField('loaded_docs_count', body.apps.loaded_docs.length)
.intField('in_memory_docs_count', body.apps.in_memory_docs.length)
.stringField(
'active_docs',
globals.config.get('Butler-SOS.influxdbConfig.includeFields.activeDocs')
? body.apps.active_docs
: ''
)
.stringField(
'active_docs_names',
globals.config.get('Butler-SOS.appNames.enableAppNameExtract') &&
globals.config.get('Butler-SOS.influxdbConfig.includeFields.activeDocs')
? appNamesActive.toString()
: ''
)
.stringField(
'active_session_docs_names',
globals.config.get('Butler-SOS.appNames.enableAppNameExtract') &&
globals.config.get('Butler-SOS.influxdbConfig.includeFields.activeDocs')
? sessionAppNamesActive.toString()
: ''
)
.stringField(
'loaded_docs',
globals.config.get('Butler-SOS.influxdbConfig.includeFields.loadedDocs')
? body.apps.loaded_docs
: ''
)
.stringField(
'loaded_docs_names',
globals.config.get('Butler-SOS.appNames.enableAppNameExtract') &&
globals.config.get('Butler-SOS.influxdbConfig.includeFields.loadedDocs')
? appNamesLoaded.toString()
: ''
)
.stringField(
'loaded_session_docs_names',
globals.config.get('Butler-SOS.appNames.enableAppNameExtract') &&
globals.config.get('Butler-SOS.influxdbConfig.includeFields.loadedDocs')
? sessionAppNamesLoaded.toString()
: ''
)
.stringField(
'in_memory_docs',
globals.config.get('Butler-SOS.influxdbConfig.includeFields.inMemoryDocs')
? body.apps.in_memory_docs
: ''
)
.stringField(
'in_memory_docs_names',
globals.config.get('Butler-SOS.appNames.enableAppNameExtract') &&
globals.config.get('Butler-SOS.influxdbConfig.includeFields.inMemoryDocs')
? appNamesInMemory.toString()
: ''
)
.stringField(
'in_memory_session_docs_names',
globals.config.get('Butler-SOS.appNames.enableAppNameExtract') &&
globals.config.get('Butler-SOS.influxdbConfig.includeFields.inMemoryDocs')
? sessionAppNamesInMemory.toString()
: ''
)
.uintField('calls', body.apps.calls)
.uintField('selections', body.apps.selections),
new Point('cpu').floatField('total', body.cpu.total),
new Point('session')
.uintField('active', body.session.active)
.uintField('total', body.session.total),
new Point('users')
.uintField('active', body.users.active)
.uintField('total', body.users.total),
new Point('cache')
.uintField('hits', body.cache.hits)
.uintField('lookups', body.cache.lookups)
.intField('added', body.cache.added)
.intField('replaced', body.cache.replaced)
.intField('bytes_added', body.cache.bytes_added),
new Point('saturated').booleanField('saturated', body.saturated),
];
// Add server tags to all points
if (serverTags && typeof serverTags === 'object') {
for (const point of points) {
for (const [key, value] of Object.entries(serverTags)) {
if (value !== undefined && value !== null) {
point.tag(key, String(value));
}
}
}
}
// Write all points to InfluxDB with retry logic
await writeToInfluxWithRetry(
async () => {
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', {
flushInterval: 5000,
maxRetries: 0,
});
try {
await writeApi.writePoints(points);
await writeApi.close();
} catch (err) {
try {
await writeApi.close();
} catch (closeErr) {
// Ignore close errors
}
throw err;
}
},
`Health metrics from ${serverName}`,
'v2',
serverName
);
globals.logger.verbose(`HEALTH METRICS V2: Stored health data from server: ${serverName}`);
}

View File

@@ -1,197 +1,243 @@
import { Point } from '@influxdata/influxdb-client';
import globals from '../../../globals.js';
import { isInfluxDbEnabled, writeToInfluxWithRetry } from '../shared/utils.js';
import { applyInfluxTags } from './utils.js';
/**
* Store log event to InfluxDB v2
* Handles log events from different Sense sources
*
* @param {object} msg - Log event message
* @returns {Promise<void>}
* @description
* Handles log events from 5 different Qlik Sense sources:
* - qseow-engine: Engine log events
* - qseow-proxy: Proxy log events
* - qseow-scheduler: Scheduler log events
* - qseow-repository: Repository log events
* - qseow-qix-perf: QIX performance metrics
*
* Each source has specific fields and tags that are written to InfluxDB.
* Note: Uses _field suffix for fields that conflict with tag names (e.g., result_code_field).
*
* @param {object} msg - Log event message containing the following properties:
* @param {string} msg.host - Hostname of the Qlik Sense server
* @param {string} msg.source - Event source (qseow-engine, qseow-proxy, qseow-scheduler, qseow-repository, qseow-qix-perf)
* @param {string} msg.level - Log level (e.g., INFO, WARN, ERROR)
* @param {string} msg.log_row - Log row identifier
* @param {string} msg.subsystem - Subsystem generating the log
* @param {string} msg.message - Log message text
* @param {string} [msg.exception_message] - Exception message if applicable
* @param {string} [msg.command] - Command being executed
* @param {string} [msg.result_code] - Result code of operation
* @param {string} [msg.origin] - Origin of the event
* @param {string} [msg.context] - Context information
* @param {string} [msg.session_id] - Session identifier
* @param {string} [msg.user_full] - Full user name
* @param {string} [msg.user_directory] - User directory
* @param {string} [msg.user_id] - User ID
* @param {string} [msg.windows_user] - Windows username
* @param {string} [msg.task_id] - Task identifier
* @param {string} [msg.task_name] - Task name
* @param {string} [msg.app_id] - Application ID
* @param {string} [msg.app_name] - Application name
* @param {string} [msg.engine_exe_version] - Engine executable version
* @param {string} [msg.execution_id] - Execution identifier (scheduler)
* @param {string} [msg.method] - QIX method (qix-perf)
* @param {string} [msg.object_type] - Object type (qix-perf)
* @param {string} [msg.proxy_session_id] - Proxy session ID (qix-perf)
* @param {string} [msg.event_activity_source] - Event activity source (qix-perf)
* @param {number} [msg.process_time] - Process time in ms (qix-perf)
* @param {number} [msg.work_time] - Work time in ms (qix-perf)
* @param {number} [msg.lock_time] - Lock time in ms (qix-perf)
* @param {number} [msg.validate_time] - Validate time in ms (qix-perf)
* @param {number} [msg.traverse_time] - Traverse time in ms (qix-perf)
* @param {string} [msg.handle] - Handle identifier (qix-perf)
* @param {number} [msg.net_ram] - Net RAM usage (qix-perf)
* @param {number} [msg.peak_ram] - Peak RAM usage (qix-perf)
* @param {string} [msg.object_id] - Object identifier (qix-perf)
* @param {Array<{name: string, value: string}>} [msg.category] - Array of category objects
* @returns {Promise<void>} Promise that resolves when data has been posted to InfluxDB
*/
export async function storeLogEventV2(msg) {
try {
globals.logger.debug(`LOG EVENT V2: ${JSON.stringify(msg)}`);
globals.logger.debug(`LOG EVENT V2: ${JSON.stringify(msg)}`);
// Check if this is a supported source
if (
msg.source !== 'qseow-engine' &&
msg.source !== 'qseow-proxy' &&
msg.source !== 'qseow-scheduler' &&
msg.source !== 'qseow-repository' &&
msg.source !== 'qseow-qix-perf'
) {
globals.logger.warn(`LOG EVENT V2: Unsupported log event source: ${msg.source}`);
return;
}
// Create write API with options
const writeOptions = {
flushInterval: 5000,
maxRetries: 2,
};
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', writeOptions);
if (!writeApi) {
globals.logger.warn('LOG EVENT V2: Influxdb write API object not found');
return;
}
let point;
// Process each source type
if (msg.source === 'qseow-engine') {
point = new Point('log_event')
.tag('host', msg.host)
.tag('level', msg.level)
.tag('source', msg.source)
.tag('log_row', msg.log_row)
.tag('subsystem', msg.subsystem)
.stringField('message', msg.message)
.stringField('exception_message', msg.exception_message)
.stringField('command', msg.command)
.stringField('result_code', msg.result_code)
.stringField('origin', msg.origin)
.stringField('context', msg.context)
.stringField('session_id', msg.session_id)
.stringField('raw_event', JSON.stringify(msg));
// Tags that are empty in some cases. Only add if they are non-empty
if (msg?.user_full?.length > 0) point.tag('user_full', msg.user_full);
if (msg?.user_directory?.length > 0) point.tag('user_directory', msg.user_directory);
if (msg?.user_id?.length > 0) point.tag('user_id', msg.user_id);
if (msg?.result_code?.length > 0) point.tag('result_code', msg.result_code);
if (msg?.windows_user?.length > 0) point.tag('windows_user', msg.windows_user);
if (msg?.task_id?.length > 0) point.tag('task_id', msg.task_id);
if (msg?.task_name?.length > 0) point.tag('task_name', msg.task_name);
if (msg?.app_id?.length > 0) point.tag('app_id', msg.app_id);
if (msg?.app_name?.length > 0) point.tag('app_name', msg.app_name);
if (msg?.engine_exe_version?.length > 0)
point.tag('engine_exe_version', msg.engine_exe_version);
} else if (msg.source === 'qseow-proxy') {
point = new Point('log_event')
.tag('host', msg.host)
.tag('level', msg.level)
.tag('source', msg.source)
.tag('log_row', msg.log_row)
.tag('subsystem', msg.subsystem)
.stringField('message', msg.message)
.stringField('exception_message', msg.exception_message)
.stringField('command', msg.command)
.stringField('result_code', msg.result_code)
.stringField('origin', msg.origin)
.stringField('context', msg.context)
.stringField('raw_event', JSON.stringify(msg));
// Tags that are empty in some cases. Only add if they are non-empty
if (msg?.user_full?.length > 0) point.tag('user_full', msg.user_full);
if (msg?.user_directory?.length > 0) point.tag('user_directory', msg.user_directory);
if (msg?.user_id?.length > 0) point.tag('user_id', msg.user_id);
if (msg?.result_code?.length > 0) point.tag('result_code', msg.result_code);
} else if (msg.source === 'qseow-scheduler') {
point = new Point('log_event')
.tag('host', msg.host)
.tag('level', msg.level)
.tag('source', msg.source)
.tag('log_row', msg.log_row)
.tag('subsystem', msg.subsystem)
.stringField('message', msg.message)
.stringField('exception_message', msg.exception_message)
.stringField('app_name', msg.app_name)
.stringField('app_id', msg.app_id)
.stringField('execution_id', msg.execution_id)
.stringField('raw_event', JSON.stringify(msg));
// Tags that are empty in some cases. Only add if they are non-empty
if (msg?.user_full?.length > 0) point.tag('user_full', msg.user_full);
if (msg?.user_directory?.length > 0) point.tag('user_directory', msg.user_directory);
if (msg?.user_id?.length > 0) point.tag('user_id', msg.user_id);
if (msg?.task_id?.length > 0) point.tag('task_id', msg.task_id);
if (msg?.task_name?.length > 0) point.tag('task_name', msg.task_name);
} else if (msg.source === 'qseow-repository') {
point = new Point('log_event')
.tag('host', msg.host)
.tag('level', msg.level)
.tag('source', msg.source)
.tag('log_row', msg.log_row)
.tag('subsystem', msg.subsystem)
.stringField('message', msg.message)
.stringField('exception_message', msg.exception_message)
.stringField('command', msg.command)
.stringField('result_code', msg.result_code)
.stringField('origin', msg.origin)
.stringField('context', msg.context)
.stringField('raw_event', JSON.stringify(msg));
// Tags that are empty in some cases. Only add if they are non-empty
if (msg?.user_full?.length > 0) point.tag('user_full', msg.user_full);
if (msg?.user_directory?.length > 0) point.tag('user_directory', msg.user_directory);
if (msg?.user_id?.length > 0) point.tag('user_id', msg.user_id);
if (msg?.result_code?.length > 0) point.tag('result_code', msg.result_code);
} else if (msg.source === 'qseow-qix-perf') {
point = new Point('log_event')
.tag('host', msg.host)
.tag('level', msg.level)
.tag('source', msg.source)
.tag('log_row', msg.log_row)
.tag('subsystem', msg.subsystem)
.tag('method', msg.method)
.tag('object_type', msg.object_type)
.tag('proxy_session_id', msg.proxy_session_id)
.tag('session_id', msg.session_id)
.tag('event_activity_source', msg.event_activity_source)
.stringField('app_id', msg.app_id)
.floatField('process_time', parseFloat(msg.process_time))
.floatField('work_time', parseFloat(msg.work_time))
.floatField('lock_time', parseFloat(msg.lock_time))
.floatField('validate_time', parseFloat(msg.validate_time))
.floatField('traverse_time', parseFloat(msg.traverse_time))
.stringField('handle', msg.handle)
.intField('net_ram', parseInt(msg.net_ram))
.intField('peak_ram', parseInt(msg.peak_ram))
.stringField('raw_event', JSON.stringify(msg));
// Tags that are empty in some cases. Only add if they are non-empty
if (msg?.user_full?.length > 0) point.tag('user_full', msg.user_full);
if (msg?.user_directory?.length > 0) point.tag('user_directory', msg.user_directory);
if (msg?.user_id?.length > 0) point.tag('user_id', msg.user_id);
if (msg?.app_id?.length > 0) point.tag('app_id', msg.app_id);
if (msg?.app_name?.length > 0) point.tag('app_name', msg.app_name);
if (msg?.object_id?.length > 0) point.tag('object_id', msg.object_id);
}
// Add log event categories to tags if available
// The msg.category array contains objects with properties 'name' and 'value'
if (msg?.category?.length > 0) {
msg.category.forEach((category) => {
point.tag(category.name, category.value);
});
}
// Add custom tags from config file to payload
if (
globals.config.has('Butler-SOS.logEvents.tags') &&
globals.config.get('Butler-SOS.logEvents.tags') !== null &&
globals.config.get('Butler-SOS.logEvents.tags').length > 0
) {
const configTags = globals.config.get('Butler-SOS.logEvents.tags');
for (const item of configTags) {
point.tag(item.name, item.value);
}
}
globals.logger.silly(`LOG EVENT V2: Influxdb datapoint: ${JSON.stringify(point, null, 2)}`);
await writeApi.writePoint(point);
globals.logger.verbose('LOG EVENT V2: Sent log event data to InfluxDB');
} catch (err) {
globals.logger.error(
`LOG EVENT V2: Error saving log event: ${globals.getErrorMessage(err)}`
);
throw err;
// Only write to InfluxDB if enabled
if (!isInfluxDbEnabled()) {
return;
}
// Validate source
if (
msg.source !== 'qseow-engine' &&
msg.source !== 'qseow-proxy' &&
msg.source !== 'qseow-scheduler' &&
msg.source !== 'qseow-repository' &&
msg.source !== 'qseow-qix-perf'
) {
globals.logger.warn(`LOG EVENT V2: Unsupported log event source: ${msg.source}`);
return;
}
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
let point;
// Process each source type
if (msg.source === 'qseow-engine') {
point = new Point('log_event')
.tag('host', msg.host)
.tag('level', msg.level)
.tag('source', msg.source)
.tag('log_row', msg.log_row)
.tag('subsystem', msg.subsystem)
.stringField('message', msg.message)
.stringField('exception_message', msg.exception_message || '')
.stringField('command', msg.command || '')
.stringField('result_code_field', msg.result_code || '')
.stringField('origin', msg.origin || '')
.stringField('context', msg.context || '')
.stringField('session_id', msg.session_id || '')
.stringField('raw_event', JSON.stringify(msg));
// Conditional tags
if (msg?.user_full?.length > 0) point.tag('user_full', msg.user_full);
if (msg?.user_directory?.length > 0) point.tag('user_directory', msg.user_directory);
if (msg?.user_id?.length > 0) point.tag('user_id', msg.user_id);
if (msg?.result_code?.length > 0) point.tag('result_code', msg.result_code);
if (msg?.windows_user?.length > 0) point.tag('windows_user', msg.windows_user);
if (msg?.task_id?.length > 0) point.tag('task_id', msg.task_id);
if (msg?.task_name?.length > 0) point.tag('task_name', msg.task_name);
if (msg?.app_id?.length > 0) point.tag('app_id', msg.app_id);
if (msg?.app_name?.length > 0) point.tag('app_name', msg.app_name);
if (msg?.engine_exe_version?.length > 0)
point.tag('engine_exe_version', msg.engine_exe_version);
} else if (msg.source === 'qseow-proxy') {
point = new Point('log_event')
.tag('host', msg.host)
.tag('level', msg.level)
.tag('source', msg.source)
.tag('log_row', msg.log_row)
.tag('subsystem', msg.subsystem)
.stringField('message', msg.message)
.stringField('exception_message', msg.exception_message || '')
.stringField('command', msg.command || '')
.stringField('result_code_field', msg.result_code || '')
.stringField('origin', msg.origin || '')
.stringField('context', msg.context || '')
.stringField('raw_event', JSON.stringify(msg));
// Conditional tags
if (msg?.user_full?.length > 0) point.tag('user_full', msg.user_full);
if (msg?.user_directory?.length > 0) point.tag('user_directory', msg.user_directory);
if (msg?.user_id?.length > 0) point.tag('user_id', msg.user_id);
if (msg?.result_code?.length > 0) point.tag('result_code', msg.result_code);
} else if (msg.source === 'qseow-scheduler') {
point = new Point('log_event')
.tag('host', msg.host)
.tag('level', msg.level)
.tag('source', msg.source)
.tag('log_row', msg.log_row)
.tag('subsystem', msg.subsystem)
.stringField('message', msg.message)
.stringField('exception_message', msg.exception_message || '')
.stringField('app_name', msg.app_name || '')
.stringField('app_id', msg.app_id || '')
.stringField('execution_id', msg.execution_id || '')
.stringField('raw_event', JSON.stringify(msg));
// Conditional tags
if (msg?.user_full?.length > 0) point.tag('user_full', msg.user_full);
if (msg?.user_directory?.length > 0) point.tag('user_directory', msg.user_directory);
if (msg?.user_id?.length > 0) point.tag('user_id', msg.user_id);
if (msg?.task_id?.length > 0) point.tag('task_id', msg.task_id);
if (msg?.task_name?.length > 0) point.tag('task_name', msg.task_name);
} else if (msg.source === 'qseow-repository') {
point = new Point('log_event')
.tag('host', msg.host)
.tag('level', msg.level)
.tag('source', msg.source)
.tag('log_row', msg.log_row)
.tag('subsystem', msg.subsystem)
.stringField('message', msg.message)
.stringField('exception_message', msg.exception_message || '')
.stringField('command', msg.command || '')
.stringField('result_code_field', msg.result_code || '')
.stringField('origin', msg.origin || '')
.stringField('context', msg.context || '')
.stringField('raw_event', JSON.stringify(msg));
// Conditional tags
if (msg?.user_full?.length > 0) point.tag('user_full', msg.user_full);
if (msg?.user_directory?.length > 0) point.tag('user_directory', msg.user_directory);
if (msg?.user_id?.length > 0) point.tag('user_id', msg.user_id);
if (msg?.result_code?.length > 0) point.tag('result_code', msg.result_code);
} else if (msg.source === 'qseow-qix-perf') {
point = new Point('log_event')
.tag('host', msg.host)
.tag('level', msg.level)
.tag('source', msg.source)
.tag('log_row', msg.log_row)
.tag('subsystem', msg.subsystem)
.tag('method', msg.method)
.tag('object_type', msg.object_type)
.tag('proxy_session_id', msg.proxy_session_id)
.tag('session_id', msg.session_id)
.tag('event_activity_source', msg.event_activity_source)
.stringField('app_id', msg.app_id || '')
.floatField('process_time', parseFloat(msg.process_time))
.floatField('work_time', parseFloat(msg.work_time))
.floatField('lock_time', parseFloat(msg.lock_time))
.floatField('validate_time', parseFloat(msg.validate_time))
.floatField('traverse_time', parseFloat(msg.traverse_time))
.stringField('handle', msg.handle || '')
.intField('net_ram', parseInt(msg.net_ram))
.intField('peak_ram', parseInt(msg.peak_ram))
.stringField('raw_event', JSON.stringify(msg));
// Conditional tags
if (msg?.user_full?.length > 0) point.tag('user_full', msg.user_full);
if (msg?.user_directory?.length > 0) point.tag('user_directory', msg.user_directory);
if (msg?.user_id?.length > 0) point.tag('user_id', msg.user_id);
if (msg?.app_id?.length > 0) point.tag('app_id', msg.app_id);
if (msg?.app_name?.length > 0) point.tag('app_name', msg.app_name);
if (msg?.object_id?.length > 0) point.tag('object_id', msg.object_id);
}
// Add log event categories to tags if available
if (msg?.category?.length > 0) {
msg.category.forEach((category) => {
point.tag(category.name, category.value);
});
}
// Add custom tags from config file
const configTags = globals.config.get('Butler-SOS.logEvents.tags');
applyInfluxTags(point, configTags);
globals.logger.silly(`LOG EVENT V2: Influxdb datapoint: ${JSON.stringify(point, null, 2)}`);
// Write to InfluxDB with retry logic
await writeToInfluxWithRetry(
async () => {
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', {
flushInterval: 5000,
maxRetries: 0,
});
try {
await writeApi.writePoint(point);
await writeApi.close();
} catch (err) {
try {
await writeApi.close();
} catch (closeErr) {
// Ignore close errors
}
throw err;
}
},
`Log event for ${msg.host}`,
'v2',
msg.host
);
globals.logger.verbose('LOG EVENT V2: Sent log event data to InfluxDB');
}

View File

@@ -1,175 +1,204 @@
import { Point } from '@influxdata/influxdb-client';
import globals from '../../../globals.js';
import { logError } from '../../log-error.js';
import { isInfluxDbEnabled, writeToInfluxWithRetry } from '../shared/utils.js';
import { applyInfluxTags } from './utils.js';
/**
* Store user event queue metrics to InfluxDB v2
*
* @returns {Promise<void>}
* @description
* Retrieves metrics from the user event queue manager and stores them in InfluxDB v2
* for monitoring queue health, backpressure, dropped messages, and processing performance.
* After successful write, clears the metrics to start fresh tracking.
*
* Metrics include:
* - Queue size and utilization
* - Message counts (received, queued, processed, failed, dropped)
* - Processing time statistics (average, p95, max)
* - Rate limiting and backpressure status
*
* @returns {Promise<void>} Promise that resolves when data has been posted to InfluxDB
*/
export async function storeUserEventQueueMetricsV2() {
try {
// Check if queue metrics are enabled
if (
!globals.config.get(
'Butler-SOS.userEvents.udpServerConfig.queueMetrics.influxdb.enable'
)
) {
return;
}
// Get metrics from queue manager
const queueManager = globals.udpQueueManagerUserActivity;
if (!queueManager) {
globals.logger.warn('USER EVENT QUEUE METRICS V2: Queue manager not initialized');
return;
}
const metrics = await queueManager.getMetrics();
// Get configuration
const measurementName = globals.config.get(
'Butler-SOS.userEvents.udpServerConfig.queueMetrics.influxdb.measurementName'
);
const configTags = globals.config.get(
'Butler-SOS.userEvents.udpServerConfig.queueMetrics.influxdb.tags'
);
// Create write API with options
const writeOptions = {
flushInterval: 5000,
maxRetries: 2,
};
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', writeOptions);
if (!writeApi) {
globals.logger.warn('USER EVENT QUEUE METRICS V2: Influxdb write API object not found');
return;
}
const point = new Point(measurementName)
.tag('queue_type', 'user_events')
.tag('host', globals.hostInfo.hostname)
.intField('queue_size', metrics.queueSize)
.intField('queue_max_size', metrics.queueMaxSize)
.floatField('queue_utilization_pct', metrics.queueUtilizationPct)
.intField('queue_pending', metrics.queuePending)
.intField('messages_received', metrics.messagesReceived)
.intField('messages_queued', metrics.messagesQueued)
.intField('messages_processed', metrics.messagesProcessed)
.intField('messages_failed', metrics.messagesFailed)
.intField('messages_dropped_total', metrics.messagesDroppedTotal)
.intField('messages_dropped_rate_limit', metrics.messagesDroppedRateLimit)
.intField('messages_dropped_queue_full', metrics.messagesDroppedQueueFull)
.intField('messages_dropped_size', metrics.messagesDroppedSize)
.floatField('processing_time_avg_ms', metrics.processingTimeAvgMs)
.floatField('processing_time_p95_ms', metrics.processingTimeP95Ms)
.floatField('processing_time_max_ms', metrics.processingTimeMaxMs)
.intField('rate_limit_current', metrics.rateLimitCurrent)
.intField('backpressure_active', metrics.backpressureActive);
// Add static tags from config file
if (configTags && configTags.length > 0) {
for (const item of configTags) {
point.tag(item.name, item.value);
}
}
writeApi.writePoint(point);
await writeApi.close();
globals.logger.verbose('USER EVENT QUEUE METRICS V2: Sent queue metrics data to InfluxDB');
} catch (err) {
logError('USER EVENT QUEUE METRICS V2: Error saving data', err);
throw err;
// Check if queue metrics are enabled
if (!globals.config.get('Butler-SOS.userEvents.udpServerConfig.queueMetrics.influxdb.enable')) {
return;
}
// Only write to InfluxDB if enabled
if (!isInfluxDbEnabled()) {
return;
}
// Get metrics from queue manager
const queueManager = globals.udpQueueManagerUserActivity;
if (!queueManager) {
globals.logger.warn('USER EVENT QUEUE METRICS V2: Queue manager not initialized');
return;
}
const metrics = await queueManager.getMetrics();
// Get configuration
const measurementName = globals.config.get(
'Butler-SOS.userEvents.udpServerConfig.queueMetrics.influxdb.measurementName'
);
const configTags = globals.config.get(
'Butler-SOS.userEvents.udpServerConfig.queueMetrics.influxdb.tags'
);
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
const point = new Point(measurementName)
.tag('queue_type', 'user_events')
.tag('host', globals.hostInfo.hostname)
.intField('queue_size', metrics.queueSize)
.intField('queue_max_size', metrics.queueMaxSize)
.floatField('queue_utilization_pct', metrics.queueUtilizationPct)
.intField('queue_pending', metrics.queuePending)
.intField('messages_received', metrics.messagesReceived)
.intField('messages_queued', metrics.messagesQueued)
.intField('messages_processed', metrics.messagesProcessed)
.intField('messages_failed', metrics.messagesFailed)
.intField('messages_dropped_total', metrics.messagesDroppedTotal)
.intField('messages_dropped_rate_limit', metrics.messagesDroppedRateLimit)
.intField('messages_dropped_queue_full', metrics.messagesDroppedQueueFull)
.intField('messages_dropped_size', metrics.messagesDroppedSize)
.floatField('processing_time_avg_ms', metrics.processingTimeAvgMs)
.floatField('processing_time_p95_ms', metrics.processingTimeP95Ms)
.floatField('processing_time_max_ms', metrics.processingTimeMaxMs)
.intField('rate_limit_current', metrics.rateLimitCurrent)
.intField('backpressure_active', metrics.backpressureActive);
// Add static tags from config file
applyInfluxTags(point, configTags);
// Write to InfluxDB with retry logic
await writeToInfluxWithRetry(
async () => {
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', {
flushInterval: 5000,
maxRetries: 0,
});
try {
await writeApi.writePoint(point);
await writeApi.close();
} catch (err) {
try {
await writeApi.close();
} catch (closeErr) {
// Ignore close errors
}
throw err;
}
},
'User event queue metrics',
'v2',
'user-events-queue'
);
globals.logger.verbose('USER EVENT QUEUE METRICS V2: Sent queue metrics data to InfluxDB');
// Clear metrics after successful write
await queueManager.clearMetrics();
}
/**
* Store log event queue metrics to InfluxDB v2
*
* @returns {Promise<void>}
* @description
* Retrieves metrics from the log event queue manager and stores them in InfluxDB v2
* for monitoring queue health, backpressure, dropped messages, and processing performance.
* After successful write, clears the metrics to start fresh tracking.
*
* Metrics include:
* - Queue size and utilization
* - Message counts (received, queued, processed, failed, dropped)
* - Processing time statistics (average, p95, max)
* - Rate limiting and backpressure status
*
* @returns {Promise<void>} Promise that resolves when data has been posted to InfluxDB
*/
export async function storeLogEventQueueMetricsV2() {
try {
// Check if queue metrics are enabled
if (
!globals.config.get('Butler-SOS.logEvents.udpServerConfig.queueMetrics.influxdb.enable')
) {
return;
}
// Get metrics from queue manager
const queueManager = globals.udpQueueManagerLogEvents;
if (!queueManager) {
globals.logger.warn('LOG EVENT QUEUE METRICS V2: Queue manager not initialized');
return;
}
const metrics = await queueManager.getMetrics();
// Get configuration
const measurementName = globals.config.get(
'Butler-SOS.logEvents.udpServerConfig.queueMetrics.influxdb.measurementName'
);
const configTags = globals.config.get(
'Butler-SOS.logEvents.udpServerConfig.queueMetrics.influxdb.tags'
);
// Create write API with options
const writeOptions = {
flushInterval: 5000,
maxRetries: 2,
};
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', writeOptions);
if (!writeApi) {
globals.logger.warn('LOG EVENT QUEUE METRICS V2: Influxdb write API object not found');
return;
}
const point = new Point(measurementName)
.tag('queue_type', 'log_events')
.tag('host', globals.hostInfo.hostname)
.intField('queue_size', metrics.queueSize)
.intField('queue_max_size', metrics.queueMaxSize)
.floatField('queue_utilization_pct', metrics.queueUtilizationPct)
.intField('queue_pending', metrics.queuePending)
.intField('messages_received', metrics.messagesReceived)
.intField('messages_queued', metrics.messagesQueued)
.intField('messages_processed', metrics.messagesProcessed)
.intField('messages_failed', metrics.messagesFailed)
.intField('messages_dropped_total', metrics.messagesDroppedTotal)
.intField('messages_dropped_rate_limit', metrics.messagesDroppedRateLimit)
.intField('messages_dropped_queue_full', metrics.messagesDroppedQueueFull)
.intField('messages_dropped_size', metrics.messagesDroppedSize)
.floatField('processing_time_avg_ms', metrics.processingTimeAvgMs)
.floatField('processing_time_p95_ms', metrics.processingTimeP95Ms)
.floatField('processing_time_max_ms', metrics.processingTimeMaxMs)
.intField('rate_limit_current', metrics.rateLimitCurrent)
.intField('backpressure_active', metrics.backpressureActive);
// Add static tags from config file
if (configTags && configTags.length > 0) {
for (const item of configTags) {
point.tag(item.name, item.value);
}
}
writeApi.writePoint(point);
await writeApi.close();
globals.logger.verbose('LOG EVENT QUEUE METRICS V2: Sent queue metrics data to InfluxDB');
} catch (err) {
logError('LOG EVENT QUEUE METRICS V2: Error saving data', err);
throw err;
// Check if queue metrics are enabled
if (!globals.config.get('Butler-SOS.logEvents.udpServerConfig.queueMetrics.influxdb.enable')) {
return;
}
// Only write to InfluxDB if enabled
if (!isInfluxDbEnabled()) {
return;
}
// Get metrics from queue manager
const queueManager = globals.udpQueueManagerLogEvents;
if (!queueManager) {
globals.logger.warn('LOG EVENT QUEUE METRICS V2: Queue manager not initialized');
return;
}
const metrics = await queueManager.getMetrics();
// Get configuration
const measurementName = globals.config.get(
'Butler-SOS.logEvents.udpServerConfig.queueMetrics.influxdb.measurementName'
);
const configTags = globals.config.get(
'Butler-SOS.logEvents.udpServerConfig.queueMetrics.influxdb.tags'
);
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
const point = new Point(measurementName)
.tag('queue_type', 'log_events')
.tag('host', globals.hostInfo.hostname)
.intField('queue_size', metrics.queueSize)
.intField('queue_max_size', metrics.queueMaxSize)
.floatField('queue_utilization_pct', metrics.queueUtilizationPct)
.intField('queue_pending', metrics.queuePending)
.intField('messages_received', metrics.messagesReceived)
.intField('messages_queued', metrics.messagesQueued)
.intField('messages_processed', metrics.messagesProcessed)
.intField('messages_failed', metrics.messagesFailed)
.intField('messages_dropped_total', metrics.messagesDroppedTotal)
.intField('messages_dropped_rate_limit', metrics.messagesDroppedRateLimit)
.intField('messages_dropped_queue_full', metrics.messagesDroppedQueueFull)
.intField('messages_dropped_size', metrics.messagesDroppedSize)
.floatField('processing_time_avg_ms', metrics.processingTimeAvgMs)
.floatField('processing_time_p95_ms', metrics.processingTimeP95Ms)
.floatField('processing_time_max_ms', metrics.processingTimeMaxMs)
.intField('rate_limit_current', metrics.rateLimitCurrent)
.intField('backpressure_active', metrics.backpressureActive);
// Add static tags from config file
applyInfluxTags(point, configTags);
// Write to InfluxDB with retry logic
await writeToInfluxWithRetry(
async () => {
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', {
flushInterval: 5000,
maxRetries: 0,
});
try {
await writeApi.writePoint(point);
await writeApi.close();
} catch (err) {
try {
await writeApi.close();
} catch (closeErr) {
// Ignore close errors
}
throw err;
}
},
'Log event queue metrics',
'v2',
'log-events-queue'
);
globals.logger.verbose('LOG EVENT QUEUE METRICS V2: Sent queue metrics data to InfluxDB');
// Clear metrics after successful write
await queueManager.clearMetrics();
}

View File

@@ -1,47 +1,92 @@
import globals from '../../../globals.js';
import { isInfluxDbEnabled, writeToInfluxWithRetry } from '../shared/utils.js';
/**
* Store proxy session data to InfluxDB v2
*
* @param {object} userSessions - User session data including datapointInfluxdb array
* @returns {Promise<void>}
* @description
* Stores user session data from Qlik Sense proxy to InfluxDB v2. The function writes
* pre-formatted session data points that have already been converted to InfluxDB Point objects.
*
* The userSessions.datapointInfluxdb array typically contains three types of measurements:
* - user_session_summary: Summary with session count and user list
* - user_session_list: List of users (for compatibility)
* - user_session_details: Individual session details for each active session
*
* @param {object} userSessions - User session data object
* @param {string} userSessions.serverName - Name of the Qlik Sense server
* @param {string} userSessions.host - Hostname of the Qlik Sense server
* @param {string} userSessions.virtualProxy - Virtual proxy name
* @param {number} userSessions.sessionCount - Total number of active sessions
* @param {string} userSessions.uniqueUserList - Comma-separated list of unique users
* @param {Array<Point>} userSessions.datapointInfluxdb - Array of InfluxDB Point objects to write.
* Each Point object in the array is already formatted and ready to write.
* @returns {Promise<void>} Promise that resolves when data has been posted to InfluxDB
*/
export async function storeSessionsV2(userSessions) {
try {
// Find writeApi for the server specified by serverName
const writeApi = globals.influxWriteApi.find(
(element) => element.serverName === userSessions.serverName
);
globals.logger.debug(`PROXY SESSIONS V2: User sessions: ${JSON.stringify(userSessions)}`);
if (!writeApi) {
globals.logger.warn(
`PROXY SESSIONS V2: Influxdb write API object not found for host ${userSessions.host}`
);
return;
}
globals.logger.silly(
`PROXY SESSIONS V2: Influxdb datapoint for server "${userSessions.host}", virtual proxy "${userSessions.virtualProxy}": ${JSON.stringify(
userSessions.datapointInfluxdb,
null,
2
)}`
);
// Data points are already in InfluxDB v2 format (Point objects)
// Write array of measurements: user_session_summary, user_session_list, user_session_details
await writeApi.writeAPI.writePoints(userSessions.datapointInfluxdb);
globals.logger.verbose(
`PROXY SESSIONS V2: Sent user session data to InfluxDB for server "${userSessions.host}", virtual proxy "${userSessions.virtualProxy}"`
);
} catch (err) {
// Track error count
await globals.errorTracker.incrementError('INFLUXDB_V2_WRITE', userSessions.serverName);
globals.logger.error(
`PROXY SESSIONS V2: Error saving user session data: ${globals.getErrorMessage(err)}`
);
throw err;
// Only write to InfluxDB if enabled
if (!isInfluxDbEnabled()) {
return;
}
// Validate input - ensure datapointInfluxdb is an array
if (!Array.isArray(userSessions.datapointInfluxdb)) {
globals.logger.warn(
`PROXY SESSIONS V2: Invalid data format for host ${userSessions.host} - datapointInfluxdb must be an array`
);
return;
}
// Find writeApi for the server specified by serverName
const writeApi = globals.influxWriteApi.find(
(element) => element.serverName === userSessions.serverName
);
if (!writeApi) {
globals.logger.warn(
`PROXY SESSIONS V2: Influxdb write API object not found for host ${userSessions.host}`
);
return;
}
globals.logger.silly(
`PROXY SESSIONS V2: Influxdb datapoint for server "${userSessions.host}", virtual proxy "${userSessions.virtualProxy}": ${JSON.stringify(
userSessions.datapointInfluxdb,
null,
2
)}`
);
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
// Write array of measurements using retry logic
await writeToInfluxWithRetry(
async () => {
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', {
flushInterval: 5000,
maxRetries: 0,
});
try {
await writeApi.writePoints(userSessions.datapointInfluxdb);
await writeApi.close();
} catch (err) {
try {
await writeApi.close();
} catch (closeErr) {
// Ignore close errors
}
throw err;
}
},
`Proxy sessions for ${userSessions.host}/${userSessions.virtualProxy}`,
'v2',
userSessions.serverName
);
globals.logger.verbose(
`PROXY SESSIONS V2: Sent user session data to InfluxDB for server "${userSessions.host}", virtual proxy "${userSessions.virtualProxy}"`
);
}

View File

@@ -1,80 +1,107 @@
import { Point } from '@influxdata/influxdb-client';
import globals from '../../../globals.js';
import { isInfluxDbEnabled, writeToInfluxWithRetry } from '../shared/utils.js';
import { applyInfluxTags } from './utils.js';
/**
* Store user event to InfluxDB v2
*
* @param {object} msg - User event message
* @returns {Promise<void>}
* @description
* Stores user interaction events from Qlik Sense to InfluxDB v2 for tracking user activity,
* including app interactions, user agent information, and custom tags.
*
* @param {object} msg - User event message containing event details
* @param {string} msg.host - Hostname of the Qlik Sense server
* @param {string} msg.command - Event action/command (e.g., OpenApp, CreateApp, etc.)
* @param {string} msg.user_directory - User directory
* @param {string} msg.user_id - User ID
* @param {string} msg.origin - Origin of the event (e.g., Qlik Sense, QlikView, etc.)
* @param {string} [msg.appId] - Application ID (if applicable)
* @param {string} [msg.appName] - Application name (if applicable)
* @param {object} [msg.ua] - User agent information object
* @param {object} [msg.ua.browser] - Browser information
* @param {string} [msg.ua.browser.name] - Browser name
* @param {string} [msg.ua.browser.major] - Browser major version
* @param {object} [msg.ua.os] - Operating system information
* @param {string} [msg.ua.os.name] - OS name
* @param {string} [msg.ua.os.version] - OS version
* @returns {Promise<void>} Promise that resolves when data has been posted to InfluxDB
*/
export async function storeUserEventV2(msg) {
try {
globals.logger.debug(`USER EVENT V2: ${JSON.stringify(msg)}`);
globals.logger.debug(`USER EVENT V2: ${JSON.stringify(msg)}`);
// Create write API with options
const writeOptions = {
flushInterval: 5000,
maxRetries: 2,
};
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', writeOptions);
if (!writeApi) {
globals.logger.warn('USER EVENT V2: Influxdb write API object not found');
return;
}
// Create point using v2 Point class
const point = new Point('user_events')
.tag('host', msg.host)
.tag('event_action', msg.command)
.tag('userFull', `${msg.user_directory}\\${msg.user_id}`)
.tag('userDirectory', msg.user_directory)
.tag('userId', msg.user_id)
.tag('origin', msg.origin)
.stringField('userFull', `${msg.user_directory}\\${msg.user_id}`)
.stringField('userId', msg.user_id);
// Add app id and name to tags if available
if (msg?.appId) point.tag('appId', msg.appId);
if (msg?.appName) point.tag('appName', msg.appName);
// Add user agent info to tags if available
if (msg?.ua?.browser?.name) point.tag('uaBrowserName', msg?.ua?.browser?.name);
if (msg?.ua?.browser?.major) point.tag('uaBrowserMajorVersion', msg?.ua?.browser?.major);
if (msg?.ua?.os?.name) point.tag('uaOsName', msg?.ua?.os?.name);
if (msg?.ua?.os?.version) point.tag('uaOsVersion', msg?.ua?.os?.version);
// Add custom tags from config file to payload
if (
globals.config.has('Butler-SOS.userEvents.tags') &&
globals.config.get('Butler-SOS.userEvents.tags') !== null &&
globals.config.get('Butler-SOS.userEvents.tags').length > 0
) {
const configTags = globals.config.get('Butler-SOS.userEvents.tags');
for (const item of configTags) {
point.tag(item.name, item.value);
}
}
// Add app id and name to fields if available
if (msg?.appId) point.stringField('appId', msg.appId);
if (msg?.appName) point.stringField('appName', msg.appName);
globals.logger.silly(
`USER EVENT V2: Influxdb datapoint: ${JSON.stringify(point, null, 2)}`
);
await writeApi.writePoint(point);
globals.logger.verbose('USER EVENT V2: Sent user event data to InfluxDB');
} catch (err) {
globals.logger.error(
`USER EVENT V2: Error saving user event: ${globals.getErrorMessage(err)}`
);
throw err;
// Only write to InfluxDB if enabled
if (!isInfluxDbEnabled()) {
return;
}
// Validate required fields
if (!msg.host || !msg.command || !msg.user_directory || !msg.user_id || !msg.origin) {
globals.logger.warn(
`USER EVENT V2: Missing required fields in user event message: ${JSON.stringify(msg)}`
);
return;
}
const org = globals.config.get('Butler-SOS.influxdbConfig.v2Config.org');
const bucketName = globals.config.get('Butler-SOS.influxdbConfig.v2Config.bucket');
// Create point using v2 Point class
const point = new Point('user_events')
.tag('host', msg.host)
.tag('event_action', msg.command)
.tag('userFull', `${msg.user_directory}\\${msg.user_id}`)
.tag('userDirectory', msg.user_directory)
.tag('userId', msg.user_id)
.tag('origin', msg.origin)
.stringField('userFull', `${msg.user_directory}\\${msg.user_id}`)
.stringField('userId', msg.user_id);
// Add app id and name to tags and fields if available
if (msg?.appId) {
point.tag('appId', msg.appId);
point.stringField('appId_field', msg.appId);
}
if (msg?.appName) {
point.tag('appName', msg.appName);
point.stringField('appName_field', msg.appName);
}
// Add user agent info to tags if available
if (msg?.ua?.browser?.name) point.tag('uaBrowserName', msg?.ua?.browser?.name);
if (msg?.ua?.browser?.major) point.tag('uaBrowserMajorVersion', msg?.ua?.browser?.major);
if (msg?.ua?.os?.name) point.tag('uaOsName', msg?.ua?.os?.name);
if (msg?.ua?.os?.version) point.tag('uaOsVersion', msg?.ua?.os?.version);
// Add custom tags from config file
const configTags = globals.config.get('Butler-SOS.userEvents.tags');
applyInfluxTags(point, configTags);
globals.logger.silly(`USER EVENT V2: Influxdb datapoint: ${JSON.stringify(point, null, 2)}`);
// Write to InfluxDB with retry logic
await writeToInfluxWithRetry(
async () => {
const writeApi = globals.influx.getWriteApi(org, bucketName, 'ns', {
flushInterval: 5000,
maxRetries: 0,
});
try {
await writeApi.writePoint(point);
await writeApi.close();
} catch (err) {
try {
await writeApi.close();
} catch (closeErr) {
// Ignore close errors
}
throw err;
}
},
`User event for ${msg.host}`,
'v2',
msg.host
);
globals.logger.verbose('USER EVENT V2: Sent user event data to InfluxDB');
}

View File

@@ -0,0 +1,22 @@
import { Point } from '@influxdata/influxdb-client';
/**
* Applies tags from config to an InfluxDB Point object.
*
* @param {Point} point - The InfluxDB Point object
* @param {Array<{name: string, value: string}>} tags - Array of tag objects
* @returns {Point} The Point object with tags applied (for chaining)
*/
export function applyInfluxTags(point, tags) {
if (!tags || !Array.isArray(tags) || tags.length === 0) {
return point;
}
for (const tag of tags) {
if (tag.name && tag.value !== undefined && tag.value !== null) {
point.tag(tag.name, String(tag.value));
}
}
return point;
}

File diff suppressed because it is too large Load Diff

View File

@@ -44,7 +44,7 @@ jest.unstable_mockModule('uuid', () => ({
}));
// Mock posting modules
jest.unstable_mockModule('../../../post-to-influxdb.js', () => ({
jest.unstable_mockModule('../../../influxdb/index.js', () => ({
postUserEventToInfluxdb: jest.fn(),
storeEventCountInfluxDB: jest.fn(),
storeRejectedEventCountInfluxDB: jest.fn(),
@@ -61,7 +61,7 @@ jest.unstable_mockModule('../../../post-to-mqtt.js', () => ({
// Import modules after mocking
const { validate } = await import('uuid');
const { UAParser } = await import('ua-parser-js');
const { postUserEventToInfluxdb } = await import('../../../post-to-influxdb.js');
const { postUserEventToInfluxdb } = await import('../../../influxdb/index.js');
const { postUserEventToNewRelic } = await import('../../../post-to-new-relic.js');
const { postUserEventToMQTT } = await import('../../../post-to-mqtt.js');
const { default: globals } = await import('../../../../globals.js');