mirror of
https://github.com/ptarmiganlabs/butler-sos.git
synced 2025-12-19 17:58:18 -05:00
Move to ./src subdir. Lots of code cleanup.
This commit is contained in:
543
butler-sos.js
543
butler-sos.js
@@ -1,543 +0,0 @@
|
||||
// Add dependencies
|
||||
var request = require("request");
|
||||
var restify = require('restify');
|
||||
|
||||
// Load code from sub modules
|
||||
var globals = require("./globals");
|
||||
|
||||
// Load certificates to use when connecting to healthcheck API
|
||||
var fs = require("fs"),
|
||||
path = require("path"),
|
||||
certFile = path.resolve(__dirname, globals.config.get("Butler-SOS.cert.clientCert")),
|
||||
keyFile = path.resolve(__dirname, globals.config.get("Butler-SOS.cert.clientCertKey")),
|
||||
caFile = path.resolve(__dirname, globals.config.get("Butler-SOS.cert.clientCertCA"));
|
||||
|
||||
|
||||
// ---------------------------------------------------
|
||||
// Create restServer object
|
||||
var restServer = restify.createServer({
|
||||
name: 'Docker healthcheck for Butler-SOS',
|
||||
version: globals.appVersion
|
||||
});
|
||||
|
||||
// Enable parsing of http parameters
|
||||
restServer.use(restify.plugins.queryParser());
|
||||
|
||||
// Set up endpoint for REST server
|
||||
restServer.get({
|
||||
path: '/',
|
||||
flags: 'i'
|
||||
}, (req, res, next) => {
|
||||
globals.logger.verbose(`Docker healthcheck API endpoint called.`);
|
||||
|
||||
res.send(0);
|
||||
next();
|
||||
});
|
||||
|
||||
|
||||
// Set specific log level (if/when needed to override the config file setting)
|
||||
// Possible values are { error: 0, warn: 1, info: 2, verbose: 3, debug: 4, silly: 5 }
|
||||
// Default is to use log level defined in config file
|
||||
globals.logger.info("--------------------------------------");
|
||||
globals.logger.info("Starting Butler SOS");
|
||||
globals.logger.info(`Log level is: ${globals.getLoggingLevel()}`);
|
||||
globals.logger.info(`App version is: ${globals.appVersion}`);
|
||||
globals.logger.info("--------------------------------------");
|
||||
|
||||
// Log info about what Qlik Sense certificates are being used
|
||||
globals.logger.debug(`Client cert: ${certFile}`);
|
||||
globals.logger.debug(`Client cert key: ${keyFile}`);
|
||||
globals.logger.debug(`CA cert: ${caFile}`);
|
||||
|
||||
// ---------------------------------------------------
|
||||
// Start Docker healthcheck REST server on port 12398
|
||||
restServer.listen(12398, function () {
|
||||
globals.logger.info('Docker healthcheck server now listening');
|
||||
});
|
||||
|
||||
|
||||
function postToInfluxdb(host, body, influxTags) {
|
||||
// Calculate server uptime
|
||||
|
||||
var dateTime = Date.now();
|
||||
var timestamp = Math.floor(dateTime);
|
||||
|
||||
var str = body.started;
|
||||
var year = str.substring(0, 4);
|
||||
var month = str.substring(4, 6);
|
||||
var day = str.substring(6, 8);
|
||||
var hour = str.substring(9, 11);
|
||||
var minute = str.substring(11, 13);
|
||||
var second = str.substring(13, 15);
|
||||
var dateTimeStarted = new Date(year, month - 1, day, hour, minute, second);
|
||||
var timestampStarted = Math.floor(dateTimeStarted);
|
||||
|
||||
var diff = timestamp - timestampStarted;
|
||||
|
||||
// Create a new JavaScript Date object based on the timestamp
|
||||
// multiplied by 1000 so that the argument is in milliseconds, not seconds.
|
||||
var date = new Date(diff);
|
||||
|
||||
var days = Math.trunc(diff / (1000 * 60 * 60 * 24));
|
||||
|
||||
// Hours part from the timestamp
|
||||
var hours = date.getHours();
|
||||
|
||||
// Minutes part from the timestamp
|
||||
var minutes = "0" + date.getMinutes();
|
||||
|
||||
// Seconds part from the timestamp
|
||||
var seconds = "0" + date.getSeconds();
|
||||
|
||||
// Will display time in 10:30:23 format
|
||||
var formattedTime =
|
||||
days +
|
||||
" days, " +
|
||||
hours +
|
||||
"h " +
|
||||
minutes.substr(-2) +
|
||||
"m " +
|
||||
seconds.substr(-2) +
|
||||
"s";
|
||||
|
||||
// Build tags structure that will be passed to InfluxDB
|
||||
globals.logger.debug(`Tags sent to InfluxDB: ${JSON.stringify(influxTags)}`);
|
||||
|
||||
// Write the whole reading to Influxdb
|
||||
globals.influx
|
||||
.writePoints([{
|
||||
measurement: "sense_server",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
version: body.version,
|
||||
started: body.started,
|
||||
uptime: formattedTime
|
||||
}
|
||||
},
|
||||
{
|
||||
measurement: "mem",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
comitted: body.mem.comitted,
|
||||
allocated: body.mem.allocated,
|
||||
free: body.mem.free
|
||||
}
|
||||
},
|
||||
{
|
||||
measurement: "apps",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
active_docs_count: body.apps.active_docs.length,
|
||||
loaded_docs_count: body.apps.loaded_docs.length,
|
||||
in_memory_docs_count: body.apps.in_memory_docs.length,
|
||||
active_docs: (globals.config.get("Butler-SOS.influxdbConfig.includeFields.activeDocs") ? body.apps.active_docs : ''),
|
||||
loaded_docs: (globals.config.get("Butler-SOS.influxdbConfig.includeFields.loadedDocs") ? body.apps.loaded_docs : ''),
|
||||
in_memory_docs: (globals.config.get("Butler-SOS.influxdbConfig.includeFields.inMemoryDocs") ? body.apps.in_memory_docs : ''),
|
||||
calls: body.apps.calls,
|
||||
selections: body.apps.selections
|
||||
}
|
||||
},
|
||||
{
|
||||
measurement: "cpu",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
total: body.cpu.total
|
||||
}
|
||||
},
|
||||
{
|
||||
measurement: "session",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
active: body.session.active,
|
||||
total: body.session.total
|
||||
}
|
||||
},
|
||||
{
|
||||
measurement: "users",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
active: body.users.active,
|
||||
total: body.users.total
|
||||
}
|
||||
},
|
||||
{
|
||||
measurement: "cache",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
hits: body.cache.hits,
|
||||
lookups: body.cache.lookups,
|
||||
added: body.cache.added,
|
||||
replaced: body.cache.replaced,
|
||||
bytes_added: body.cache.bytes_added
|
||||
}
|
||||
},
|
||||
{
|
||||
measurement: "saturated",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
saturated: body.saturated
|
||||
}
|
||||
}
|
||||
])
|
||||
.then(() => {
|
||||
globals.logger.verbose(`Sent health data to Influxdb for server ${influxTags.server_name}`);
|
||||
})
|
||||
|
||||
.catch(err => {
|
||||
console.error(`Error saving health data to InfluxDB! ${err.stack}`);
|
||||
});
|
||||
}
|
||||
|
||||
function postLogDbToMQTT(
|
||||
process_host,
|
||||
process_name,
|
||||
entry_level,
|
||||
message,
|
||||
timestamp
|
||||
) {
|
||||
// Get base MQTT topic
|
||||
var baseTopic = globals.config.get("Butler-SOS.mqttConfig.baseTopic");
|
||||
|
||||
// Send to MQTT
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + process_host + "/" + process_name + "/" + entry_level,
|
||||
message
|
||||
);
|
||||
}
|
||||
|
||||
function postHealthToMQTT(host, serverName, body) {
|
||||
// Get base MQTT topic
|
||||
var baseTopic = globals.config.get("Butler-SOS.mqttConfig.baseTopic");
|
||||
|
||||
// Send to MQTT
|
||||
globals.mqttClient.publish(baseTopic + serverName + "/version", body.version);
|
||||
globals.mqttClient.publish(baseTopic + serverName + "/started", body.started);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/mem/comitted",
|
||||
body.mem.committed.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/mem/allocated",
|
||||
body.mem.allocated.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/mem/free",
|
||||
body.mem.free.toString()
|
||||
);
|
||||
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/cpu/total",
|
||||
body.cpu.total.toString()
|
||||
);
|
||||
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/session/active",
|
||||
body.session.active.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/session/total",
|
||||
body.session.total.toString()
|
||||
);
|
||||
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/apps/active_docs",
|
||||
body.apps.active_docs.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/apps/loaded_docs",
|
||||
body.apps.loaded_docs.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/apps/in_memory_docs",
|
||||
body.apps.in_memory_docs.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/apps/calls",
|
||||
body.apps.calls.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/apps/selections",
|
||||
body.apps.selections.toString()
|
||||
);
|
||||
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/users/active",
|
||||
body.users.active.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/users/total",
|
||||
body.users.total.toString()
|
||||
);
|
||||
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/cache/hits",
|
||||
body.cache.hits.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/cache/lookups",
|
||||
body.cache.lookups.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/cache/added",
|
||||
body.cache.added.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/cache/replaced",
|
||||
body.cache.replaced.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/cache/bytes_added",
|
||||
body.cache.bytes_added.toString()
|
||||
);
|
||||
if (body.cache.lookups > 0) {
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/cache/hit_ratio",
|
||||
Math.floor(body.cache.hits / body.cache.lookups * 100).toString()
|
||||
);
|
||||
}
|
||||
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/saturated",
|
||||
body.saturated.toString()
|
||||
);
|
||||
}
|
||||
|
||||
function getStatsFromSense(host, influxTags) {
|
||||
globals.logger.debug(
|
||||
"URL=" + "https://" + host + "/engine/healthcheck/"
|
||||
);
|
||||
|
||||
request({
|
||||
followRedirect: true,
|
||||
url: "https://" + host + "/engine/healthcheck/",
|
||||
method: 'GET',
|
||||
headers: {
|
||||
"Cache-Control": "no-cache",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
json: true,
|
||||
cert: fs.readFileSync(certFile),
|
||||
key: fs.readFileSync(keyFile),
|
||||
ca: fs.readFileSync(caFile),
|
||||
rejectUnauthorized: false,
|
||||
requestCert: true,
|
||||
agent: false
|
||||
},
|
||||
function (error, response, body) {
|
||||
// Check for error
|
||||
if (error) {
|
||||
globals.logger.error(`Error when calling health check API: ${error}`);
|
||||
globals.logger.error(`Response: ${response}`);
|
||||
globals.logger.error(`Body: ${body}`);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!error && response.statusCode === 200) {
|
||||
globals.logger.verbose("Received ok response from " + influxTags.host);
|
||||
globals.logger.debug(JSON.stringify(body));
|
||||
|
||||
// Post to MQTT (if enabled)
|
||||
if (globals.config.get("Butler-SOS.mqttConfig.enableMQTT")) {
|
||||
globals.logger.debug("Calling MQTT posting method");
|
||||
postHealthToMQTT(host, influxTags.host, body);
|
||||
}
|
||||
|
||||
// Post to Influxdb (if enabled)
|
||||
if (globals.config.get("Butler-SOS.influxdbConfig.enableInfluxdb")) {
|
||||
globals.logger.debug("Calling Influxdb posting method");
|
||||
postToInfluxdb(host, body, influxTags);
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
if (globals.config.get("Butler-SOS.logdb.enableLogDb") == true) {
|
||||
|
||||
// Get query period from config file. If not specified there, use default value.
|
||||
var queryPeriod = '5 minutes';
|
||||
if (globals.config.has("Butler-SOS.logdb.queryPeriod")) {
|
||||
queryPeriod = globals.config.get("Butler-SOS.logdb.queryPeriod");
|
||||
}
|
||||
|
||||
// Configure timer for getting log data from Postgres
|
||||
setInterval(function () {
|
||||
globals.logger.verbose("Event started: Query log db");
|
||||
|
||||
|
||||
// Create list of logging levels to include in query
|
||||
extractErrors: true
|
||||
extractWarnings: true
|
||||
extractInfo: false
|
||||
|
||||
let arrayincludeLogLevels = [];
|
||||
if (globals.config.get("Butler-SOS.logdb.extractErrors")) {
|
||||
arrayincludeLogLevels.push("'ERROR'");
|
||||
}
|
||||
if (globals.config.get("Butler-SOS.logdb.extractWarnings")) {
|
||||
arrayincludeLogLevels.push("'WARN'");
|
||||
}
|
||||
if (globals.config.get("Butler-SOS.logdb.extractInfo")) {
|
||||
arrayincludeLogLevels.push("'INFO'");
|
||||
}
|
||||
const includeLogLevels = arrayincludeLogLevels.join();
|
||||
|
||||
// checkout a Postgres client from connection pool
|
||||
globals.pgPool.connect()
|
||||
.then(pgClient => {
|
||||
return pgClient
|
||||
.query(
|
||||
`select
|
||||
id,
|
||||
entry_timestamp as timestamp,
|
||||
entry_level,
|
||||
process_host,
|
||||
process_name,
|
||||
payload
|
||||
from public.log_entries
|
||||
where
|
||||
entry_level in (${includeLogLevels}) and
|
||||
(entry_timestamp > now() - INTERVAL '${queryPeriod}' )
|
||||
order by
|
||||
entry_timestamp desc
|
||||
`
|
||||
)
|
||||
.then(res => {
|
||||
pgClient.release();
|
||||
globals.logger.debug('Log db query got a response.');
|
||||
|
||||
var rows = res.rows;
|
||||
rows.forEach(function (row) {
|
||||
globals.logger.silly(`Log db row: ${JSON.stringify(row)}`);
|
||||
|
||||
// Post to Influxdb (if enabled)
|
||||
if (globals.config.get("Butler-SOS.influxdbConfig.enableInfluxdb")) {
|
||||
globals.logger.silly("Posting log db data to Influxdb...");
|
||||
|
||||
// Make sure that the payload message exists - storing it to Influx would otherwise throw an error
|
||||
if (!row.payload.hasOwnProperty('Message')) {
|
||||
row.payload.Message = '';
|
||||
}
|
||||
|
||||
// Get all tags for the current server.
|
||||
// Some special logic is needed to match the host value returned from Postgres with the logDbHost property from
|
||||
// the YAML config file.
|
||||
// Once we have that match we can add all the tags for that server.
|
||||
serverItem = globals.serverList.find(item => {
|
||||
globals.logger.silly(`Matching logdb host "${row.process_host}" against config file logDbHost "${item.logDbHost}"`);
|
||||
return item.logDbHost == row.process_host;
|
||||
});
|
||||
|
||||
// If data row returned from log db is about a server which is not defined in the YAML config file, we need to be careful:
|
||||
// Simple solution: only store data into Influxdb for servers defined in YAML config file.
|
||||
|
||||
if (serverItem == undefined) {
|
||||
group = '<no group>';
|
||||
srvName = '<no server>';
|
||||
srvDesc = '<no description>';
|
||||
} else {
|
||||
group = serverItem.serverTags.serverGroup;
|
||||
srvName = serverItem.serverName;
|
||||
srvDesc = serverItem.serverDescription;
|
||||
};
|
||||
|
||||
let tagsForDbEntry = {
|
||||
host: row.process_host,
|
||||
server_name: srvName,
|
||||
server_description: srvDesc,
|
||||
source_process: row.process_name,
|
||||
log_level: row.entry_level
|
||||
};
|
||||
|
||||
// Add all tags defined for this server in the config file
|
||||
if (serverItem.hasOwnProperty('serverTags')) {
|
||||
// Loop over all tags defined for the current server, adding them to the data structure that will later be passed to Influxdb
|
||||
Object.entries(serverItem.serverTags).forEach(entry => {
|
||||
tagsForDbEntry = Object.assign(tagsForDbEntry, {
|
||||
[entry[0]]: entry[1]
|
||||
});
|
||||
})
|
||||
|
||||
globals.logger.debug(`Tags passed to Influxdb as part of logdb record: ${JSON.stringify(tagsForDbEntry)}`);
|
||||
}
|
||||
|
||||
// Write the whole reading to Influxdb
|
||||
globals.influx
|
||||
.writePoints([{
|
||||
measurement: "log_event",
|
||||
tags: tagsForDbEntry,
|
||||
fields: {
|
||||
message: row.payload.Message
|
||||
},
|
||||
timestamp: row.timestamp
|
||||
}])
|
||||
.then(err => {
|
||||
globals.logger.silly('Sent log db event to Influxdb');
|
||||
})
|
||||
.catch(err => {
|
||||
console.error(`Error saving log event to InfluxDB! ${err.stack}`);
|
||||
console.error(` Full error: ${JSON.stringify(err)}`);
|
||||
})
|
||||
}
|
||||
|
||||
// Post to MQTT (if enabled)
|
||||
if (globals.config.get("Butler-SOS.mqttConfig.enableMQTT")) {
|
||||
globals.logger.silly("Posting log db data to MQTT...");
|
||||
postLogDbToMQTT(
|
||||
row.process_host,
|
||||
row.process_name,
|
||||
row.entry_level,
|
||||
row.payload.Message,
|
||||
row.timestamp
|
||||
);
|
||||
}
|
||||
});
|
||||
})
|
||||
.then(res => {
|
||||
globals.logger.verbose("Sent log event to Influxdb");
|
||||
})
|
||||
.catch(err => {
|
||||
globals.logger.error(`Log db query error: ${err.stack}`);
|
||||
// pgClient.release();
|
||||
});
|
||||
})
|
||||
.catch(err => {
|
||||
globals.logger.error(`ERROR: Could not connect to Postgres log db: ${err.stack}`);
|
||||
});
|
||||
}, globals.config.get("Butler-SOS.logdb.pollingInterval"));
|
||||
}
|
||||
|
||||
|
||||
// Configure timer for getting healthcheck data
|
||||
setInterval(function () {
|
||||
globals.logger.verbose("Event started: Statistics collection");
|
||||
|
||||
globals.serverList.forEach(function (server) {
|
||||
globals.logger.verbose(`Getting stats for server: ${server.serverName}`);
|
||||
|
||||
globals.logger.debug(JSON.stringify(server));
|
||||
|
||||
var tags = {
|
||||
host: server.host,
|
||||
server_name: server.serverName,
|
||||
server_description: server.serverDescription
|
||||
};
|
||||
// Check if there are any extra tags for this server that should be sent to InfluxDB
|
||||
if (server.hasOwnProperty('serverTags')) {
|
||||
|
||||
// Loop over all tags defined for the current server, adding them to the data structure that will later be passed to Influxdb
|
||||
Object.entries(server.serverTags).forEach(entry => {
|
||||
globals.logger.debug(`Found server tag: ${JSON.stringify(entry)}`);
|
||||
|
||||
tags = Object.assign(tags, {
|
||||
[entry[0]]: entry[1]
|
||||
});
|
||||
})
|
||||
|
||||
globals.logger.debug(`All tags: ${JSON.stringify(tags)}`);
|
||||
}
|
||||
globals.logger.debug(`Complete list of tags for server ${server.serverName}: ${JSON.stringify(tags)}`);
|
||||
|
||||
getStatsFromSense(server.host, tags);
|
||||
});
|
||||
}, globals.config.get("Butler-SOS.serversToMonitor.pollingInterval"));
|
||||
1816
package-lock.json
generated
1816
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
40
package.json
40
package.json
@@ -1,40 +0,0 @@
|
||||
{
|
||||
"name": "butler-sos",
|
||||
"version": "4.0.0",
|
||||
"description": "Butler SenseOps Stats (\"Butler SOS\") is a Node.js service publishing operational Qlik Sense metrics to MQTT and Influxdb.",
|
||||
"main": "butler-sos.js",
|
||||
"scripts": {
|
||||
"test": "node_modules/jshint/bin/jshint butler-sos.js"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+https://github.com/ptarmiganlabs/butler-sos.git"
|
||||
},
|
||||
"keywords": [
|
||||
"butler",
|
||||
"butler-sos",
|
||||
"senseops",
|
||||
"devops",
|
||||
"influxdb",
|
||||
"qliksense"
|
||||
],
|
||||
"author": "Göran Sander",
|
||||
"license": "MIT",
|
||||
"bugs": {
|
||||
"url": "https://github.com/ptarmiganlabs/butler-sos/issues"
|
||||
},
|
||||
"homepage": "https://github.com/ptarmiganlabs/butler-sos#readme",
|
||||
"dependencies": {
|
||||
"config": "^3.0.1",
|
||||
"influx": "^5.0.7",
|
||||
"js-yaml": "^3.13.0",
|
||||
"jshint": "^2.10.2",
|
||||
"mqtt": "^2.18.8",
|
||||
"pg": "^7.8.0",
|
||||
"request": "^2.88.0",
|
||||
"restify": "^7.6.0",
|
||||
"winston": "^3.2.0",
|
||||
"winston-daily-rotate-file": "^3.8.0"
|
||||
},
|
||||
"devDependencies": {}
|
||||
}
|
||||
69
src/butler-sos.js
Normal file
69
src/butler-sos.js
Normal file
@@ -0,0 +1,69 @@
|
||||
// Add dependencies
|
||||
var restify = require('restify');
|
||||
|
||||
// Load code from sub modules
|
||||
const globals = require("./globals");
|
||||
const mainMetrics = require("./lib/mainmetrics");
|
||||
|
||||
// Load certificates to use when connecting to healthcheck API
|
||||
// var fs = require("fs"):
|
||||
var path = require("path"),
|
||||
certFile = path.resolve(__dirname, globals.config.get("Butler-SOS.cert.clientCert")),
|
||||
keyFile = path.resolve(__dirname, globals.config.get("Butler-SOS.cert.clientCertKey")),
|
||||
caFile = path.resolve(__dirname, globals.config.get("Butler-SOS.cert.clientCertCA"));
|
||||
|
||||
|
||||
// ---------------------------------------------------
|
||||
// Create restServer object
|
||||
var restServer = restify.createServer({
|
||||
name: 'Docker healthcheck for Butler-SOS',
|
||||
version: globals.appVersion
|
||||
});
|
||||
|
||||
// Enable parsing of http parameters
|
||||
restServer.use(restify.plugins.queryParser());
|
||||
|
||||
// Set up endpoint for REST server
|
||||
restServer.get({
|
||||
path: '/',
|
||||
flags: 'i'
|
||||
}, (req, res, next) => {
|
||||
globals.logger.verbose(`Docker healthcheck API endpoint called.`);
|
||||
|
||||
res.send(0);
|
||||
next();
|
||||
});
|
||||
|
||||
|
||||
// Set specific log level (if/when needed to override the config file setting)
|
||||
// Possible values are { error: 0, warn: 1, info: 2, verbose: 3, debug: 4, silly: 5 }
|
||||
// Default is to use log level defined in config file
|
||||
globals.logger.info("--------------------------------------");
|
||||
globals.logger.info("Starting Butler SOS");
|
||||
globals.logger.info(`Log level is: ${globals.getLoggingLevel()}`);
|
||||
globals.logger.info(`App version is: ${globals.appVersion}`);
|
||||
globals.logger.info("--------------------------------------");
|
||||
|
||||
// Log info about what Qlik Sense certificates are being used
|
||||
globals.logger.debug(`Client cert: ${certFile}`);
|
||||
globals.logger.debug(`Client cert key: ${keyFile}`);
|
||||
globals.logger.debug(`CA cert: ${caFile}`);
|
||||
|
||||
// ---------------------------------------------------
|
||||
// Start Docker healthcheck REST server on port 12398
|
||||
restServer.listen(12398, function () {
|
||||
globals.logger.info('Docker healthcheck server now listening');
|
||||
});
|
||||
|
||||
// // Set up extraction of data from log db
|
||||
// if (globals.config.get("Butler-SOS.logdb.enableLogDb") == true) {
|
||||
// setupLogDbTimer();
|
||||
// }
|
||||
|
||||
// // Set up extraction of sessions data
|
||||
// if (globals.config.get("Butler-SOS.userSessions.enableSessionExtract") == true) {
|
||||
// setupSessionsTimer();
|
||||
// }
|
||||
|
||||
// Set up extraction on main metrics data (i.e. the Sense healthcheck API)
|
||||
mainMetrics.setupMainMetricsTimer();
|
||||
123
src/config/production.yaml
Normal file
123
src/config/production.yaml
Normal file
@@ -0,0 +1,123 @@
|
||||
Butler-SOS:
|
||||
# Logging configuration
|
||||
logLevel: debug # Log level. Possible log levels are silly, debug, verbose, info, warn, error
|
||||
fileLogging: true # true/false to enable/disable logging to disk file
|
||||
logDirectory: logs # Subdirectory where log files are stored
|
||||
|
||||
# Qlik Sense logging db config parameters
|
||||
logdb:
|
||||
enableLogDb: true
|
||||
influxDbRetentionPolicy: DEFAULT
|
||||
pollingInterval: 15000 # How often (milliseconds) should Postgres log db be queried for warnings and errors?
|
||||
queryPeriod: 3 minutes # How far back should Butler SOS query for log entries? Default is 5 min
|
||||
host: pro2-win1.ptarmiganlabs.net
|
||||
port: 4432
|
||||
qlogsReaderUser: qlogs_reader
|
||||
qlogsReaderPwd: aTEM7TcwagA
|
||||
extractErrors: true # Should error level entries be extracted from log db into Influxdb?
|
||||
extractWarnings: true # Should warn level entries be extracted from log db into Influxdb?
|
||||
extractInfo: true # Should info level entries be extracted from log db into Influxdb?
|
||||
|
||||
# Certificates to use when querying Sense for healthcheck data. Get these from the Certificate Export in QMC.
|
||||
cert:
|
||||
# clientCert: /nodeapp/config/certificate/client.pem
|
||||
# clientCertKey: /nodeapp/config/certificate/client_key.pem
|
||||
# clientCertCA: /nodeapp/config/certificate/root.pem
|
||||
clientCert: /Users/goran/code/secret/pro2-win1/pro2-win1/client.pem
|
||||
clientCertKey: /Users/goran/code/secret/pro2-win1/pro2-win1/client_key.pem
|
||||
clientCertCA: /Users/goran/code/secret/pro2-win1/pro2-win1/root.pem
|
||||
|
||||
# MQTT config parameters
|
||||
mqttConfig:
|
||||
enableMQTT: true
|
||||
brokerHost: 192.168.100.20
|
||||
brokerPort: 1883
|
||||
baseTopic: butler-sos/ # Topic should end with /
|
||||
|
||||
# Influx db config parameters
|
||||
influxdbConfig:
|
||||
enableInfluxdb: true
|
||||
hostIP: 192.168.1.51
|
||||
dbName: SenseOps
|
||||
# Control whether certain fields are stored in InfluxDB or not
|
||||
# Use with caution! Enabling activeDocs, loadedDocs or inMemoryDocs may result in lots of data sent to InfluxDB.
|
||||
includeFields:
|
||||
activeDocs: true # Should data on what docs are active be stored in Influxdb?
|
||||
loadedDocs: true # Should data on what docs are loaded be stored in Influxdb?
|
||||
inMemoryDocs: true # Should data on what docs are in memory be stored in Influxdb?
|
||||
|
||||
# Sessions per virtual proxy
|
||||
userSessions:
|
||||
enableSessionExtract: true # Query unique user IDs of what users have sessions open (true/false)?
|
||||
pollingInterval: 10000 # How often (milliseconds) should session data be polled?
|
||||
influxDbRetentionPolicy: 7days
|
||||
servers: # What hosts and including virtual proxies, should we get sessions for?
|
||||
- host: pro2-win1.ptarmiganlabs.net:4243
|
||||
virtualProxy: # Default virtual proxy
|
||||
- host: pro2-win1.ptarmiganlabs.net:4243
|
||||
virtualProxy: /hdr
|
||||
|
||||
serversToMonitor:
|
||||
pollingInterval: 15000 # How often (milliseconds) should the healthcheck API be polled?
|
||||
influxDbRetentionPolicy: 14days
|
||||
|
||||
# List of extra tags for each server. Useful for creating more advanced Grafana dashboards.
|
||||
# Each server below MUST include these tags in its serverTags property.
|
||||
# The tags below are just examples - define your own as needed
|
||||
serverTagsDefinition:
|
||||
- server_group
|
||||
- serverLocation
|
||||
- server-type
|
||||
- serverBrand
|
||||
|
||||
# Sense Servers that should be queried for healthcheck data
|
||||
servers:
|
||||
- host: pro2-win1.ptarmiganlabs.net:4747
|
||||
serverName: sense1
|
||||
serverDescription: Central
|
||||
logDbHost: pro2-win1
|
||||
serverTags:
|
||||
server_group: CENTRAL
|
||||
serverLocation: Europe
|
||||
server-type: virtual
|
||||
serverBrand: HP
|
||||
# - host: pro1-win2.ptarmiganlabs.net:4747
|
||||
# serverName: sense2
|
||||
# serverDescription: Dev server 1
|
||||
# logDbHost: win2
|
||||
# serverTags:
|
||||
# server_group: DEV
|
||||
# serverLocation: Europe
|
||||
# server-type: virtual
|
||||
# serverBrand: HP
|
||||
# - host: pro1-win3.ptarmiganlabs.net:4747
|
||||
# serverName: sense3
|
||||
# serverDescription: Dev server 2
|
||||
# logDbHost: win3
|
||||
# serverTags:
|
||||
# server_group: DEV
|
||||
# serverLocation: US
|
||||
# server-type: physical
|
||||
# serverBrand: Dell
|
||||
# - host: pro1-win4.ptarmiganlabs.net:4747
|
||||
# serverName: sense4
|
||||
# serverDescription: QA server 1
|
||||
# logDbHost: win4
|
||||
# serverTags:
|
||||
# server_group: QA
|
||||
# serverLocation: Sweden
|
||||
# server-type: physical
|
||||
# serverBrand: HP
|
||||
|
||||
# - host: sense1.int.ptarmiganlabs.net:4747
|
||||
# serverName: sense2
|
||||
# serverDescription: Central2
|
||||
# logDbHost: sense2
|
||||
# influxTags:
|
||||
# serverGroup: DEV2
|
||||
# - host: sensedev.int.ptarmiganlabs.net:4747
|
||||
# serverName: sensedev
|
||||
# serverDescription: Dev node
|
||||
# logDbHost: sensedev
|
||||
# influxTags:
|
||||
# serverGroup: DEV
|
||||
@@ -45,7 +45,15 @@ Butler-SOS:
|
||||
activeDocs: false # Should data on what docs are active be stored in Influxdb (true/false)?
|
||||
loadedDocs: false # Should data on what docs are loaded be stored in Influxdb (true/false)?
|
||||
inMemoryDocs: false # Should data on what docs are in memory be stored in Influxdb (true/false)?
|
||||
userIds: true # Extract and store in Influxdb unique user IDs of what users have sessions open (true/false)?
|
||||
|
||||
# Sessions per virtual proxy
|
||||
userSessions:
|
||||
enableSessionExtract: true # Query unique user IDs of what users have sessions open (true/false)?
|
||||
pollingInterval: 10000 # How often (milliseconds) should session data be polled?
|
||||
virtualProxies: # What virtual proxies should we get sessions for?
|
||||
- # Get sessions for the default virtual proxy (i.e. /)
|
||||
- finance # Get sessions for "finance" virtual proxy (/finance)
|
||||
- loadtest # Get sessions for "loadtest" virtual proxy (/loadtest)
|
||||
|
||||
serversToMonitor:
|
||||
pollingInterval: 30000 # How often (milliseconds) should the healthcheck API be polled?
|
||||
@@ -1,3 +1,5 @@
|
||||
// Set up REST endpoint for Docker healthchecks
|
||||
|
||||
var httpHealth = require("http");
|
||||
|
||||
var optionsHealth = {
|
||||
@@ -63,7 +63,12 @@ getLoggingLevel = () => {
|
||||
}
|
||||
|
||||
// Get info on what servers to monitor
|
||||
var serverList = config.get("Butler-SOS.serversToMonitor.servers");
|
||||
const serverList = config.get("Butler-SOS.serversToMonitor.servers");
|
||||
|
||||
|
||||
// Get info on what virtual proxies to get session data for
|
||||
const userSessionsServers = config.get("Butler-SOS.userSessions.servers");
|
||||
|
||||
|
||||
// Set up connection pool for accessing Qlik Sense log db
|
||||
const pgPool = new Pool({
|
||||
@@ -100,7 +105,7 @@ if (config.has('Butler-SOS.serversToMonitor.serverTagsDefinition')) {
|
||||
logger.debug(`Setting up new Influx database: Found server tag : ${entry}`);
|
||||
|
||||
tagValues.push(entry);
|
||||
})
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
@@ -233,5 +238,6 @@ module.exports = {
|
||||
influx,
|
||||
pgPool,
|
||||
appVersion,
|
||||
serverList
|
||||
serverList,
|
||||
userSessionsServers
|
||||
};
|
||||
159
src/lib/logdb.js
Normal file
159
src/lib/logdb.js
Normal file
@@ -0,0 +1,159 @@
|
||||
function setupLogDbTimer() {
|
||||
// Get query period from config file. If not specified there, use default value.
|
||||
var queryPeriod = '5 minutes';
|
||||
if (globals.config.has("Butler-SOS.logdb.queryPeriod")) {
|
||||
queryPeriod = globals.config.get("Butler-SOS.logdb.queryPeriod");
|
||||
}
|
||||
|
||||
// Configure timer for getting log data from Postgres
|
||||
setInterval(function () {
|
||||
globals.logger.verbose("Event started: Query log db");
|
||||
|
||||
|
||||
// Create list of logging levels to include in query
|
||||
// extractErrors: true
|
||||
// extractWarnings: true
|
||||
// extractInfo: false
|
||||
|
||||
let arrayincludeLogLevels = [];
|
||||
if (globals.config.get("Butler-SOS.logdb.extractErrors")) {
|
||||
arrayincludeLogLevels.push("'ERROR'");
|
||||
}
|
||||
if (globals.config.get("Butler-SOS.logdb.extractWarnings")) {
|
||||
arrayincludeLogLevels.push("'WARN'");
|
||||
}
|
||||
if (globals.config.get("Butler-SOS.logdb.extractInfo")) {
|
||||
arrayincludeLogLevels.push("'INFO'");
|
||||
}
|
||||
const includeLogLevels = arrayincludeLogLevels.join();
|
||||
|
||||
// checkout a Postgres client from connection pool
|
||||
globals.pgPool.connect()
|
||||
.then(pgClient => {
|
||||
return pgClient
|
||||
.query(
|
||||
`select
|
||||
id,
|
||||
entry_timestamp as timestamp,
|
||||
entry_level,
|
||||
process_host,
|
||||
process_name,
|
||||
payload
|
||||
from public.log_entries
|
||||
where
|
||||
entry_level in (${includeLogLevels}) and
|
||||
(entry_timestamp > now() - INTERVAL '${queryPeriod}' )
|
||||
order by
|
||||
entry_timestamp desc
|
||||
`
|
||||
)
|
||||
.then(res => {
|
||||
pgClient.release();
|
||||
globals.logger.debug('Log db query got a response.');
|
||||
|
||||
var rows = res.rows;
|
||||
rows.forEach(function (row) {
|
||||
globals.logger.silly(`Log db row: ${JSON.stringify(row)}`);
|
||||
|
||||
// Post to Influxdb (if enabled)
|
||||
if (globals.config.get("Butler-SOS.influxdbConfig.enableInfluxdb")) {
|
||||
globals.logger.silly("Posting log db data to Influxdb...");
|
||||
|
||||
// Make sure that the payload message exists - storing it to Influx would otherwise throw an error
|
||||
if (!row.payload.hasOwnProperty('Message')) {
|
||||
row.payload.Message = '';
|
||||
}
|
||||
|
||||
// Get all tags for the current server.
|
||||
// Some special logic is needed to match the host value returned from Postgres with the logDbHost property from
|
||||
// the YAML config file.
|
||||
// Once we have that match we can add all the tags for that server.
|
||||
serverItem = globals.serverList.find(item => {
|
||||
globals.logger.silly(`Matching logdb host "${row.process_host}" against config file logDbHost "${item.logDbHost}"`);
|
||||
return item.logDbHost == row.process_host;
|
||||
});
|
||||
|
||||
// If data row returned from log db is about a server which is not defined in the YAML config file, we need to be careful:
|
||||
// Simple solution: only store data into Influxdb for servers defined in YAML config file.
|
||||
|
||||
if (serverItem == undefined) {
|
||||
group = '<no group>';
|
||||
srvName = '<no server>';
|
||||
srvDesc = '<no description>';
|
||||
} else {
|
||||
group = serverItem.serverTags.serverGroup;
|
||||
srvName = serverItem.serverName;
|
||||
srvDesc = serverItem.serverDescription;
|
||||
};
|
||||
|
||||
let tagsForDbEntry = {
|
||||
host: row.process_host,
|
||||
server_name: srvName,
|
||||
server_description: srvDesc,
|
||||
source_process: row.process_name,
|
||||
log_level: row.entry_level
|
||||
};
|
||||
|
||||
// Add all tags defined for this server in the config file
|
||||
if (serverItem.hasOwnProperty('serverTags')) {
|
||||
// Loop over all tags defined for the current server, adding them to the data structure that will later be passed to Influxdb
|
||||
Object.entries(serverItem.serverTags).forEach(entry => {
|
||||
tagsForDbEntry = Object.assign(tagsForDbEntry, {
|
||||
[entry[0]]: entry[1]
|
||||
});
|
||||
})
|
||||
|
||||
globals.logger.debug(`Tags passed to Influxdb as part of logdb record: ${JSON.stringify(tagsForDbEntry)}`);
|
||||
}
|
||||
|
||||
// Write the whole reading to Influxdb
|
||||
globals.influx
|
||||
.writePoints([{
|
||||
measurement: "log_event",
|
||||
tags: tagsForDbEntry,
|
||||
fields: {
|
||||
message: row.payload.Message
|
||||
},
|
||||
timestamp: row.timestamp
|
||||
}])
|
||||
.then(err => {
|
||||
globals.logger.silly('Sent log db event to Influxdb');
|
||||
})
|
||||
.catch(err => {
|
||||
console.error(`Error saving log event to InfluxDB! ${err.stack}`);
|
||||
console.error(` Full error: ${JSON.stringify(err)}`);
|
||||
})
|
||||
}
|
||||
|
||||
// Post to MQTT (if enabled)
|
||||
if (globals.config.get("Butler-SOS.mqttConfig.enableMQTT")) {
|
||||
globals.logger.silly("Posting log db data to MQTT...");
|
||||
postLogDbToMQTT(
|
||||
row.process_host,
|
||||
row.process_name,
|
||||
row.entry_level,
|
||||
row.payload.Message,
|
||||
row.timestamp
|
||||
);
|
||||
}
|
||||
});
|
||||
})
|
||||
.then(res => {
|
||||
globals.logger.verbose("Sent log event to Influxdb");
|
||||
})
|
||||
.catch(err => {
|
||||
globals.logger.error(`Log db query error: ${err.stack}`);
|
||||
// pgClient.release();
|
||||
});
|
||||
})
|
||||
.catch(err => {
|
||||
globals.logger.error(`ERROR: Could not connect to Postgres log db: ${err.stack}`);
|
||||
});
|
||||
}, globals.config.get("Butler-SOS.logdb.pollingInterval"));
|
||||
|
||||
}
|
||||
|
||||
|
||||
module.exports = {
|
||||
setupLogDbTimer
|
||||
};
|
||||
111
src/lib/mainmetrics.js
Normal file
111
src/lib/mainmetrics.js
Normal file
@@ -0,0 +1,111 @@
|
||||
// Get metrics from the Sense health check API
|
||||
|
||||
var request = require('request');
|
||||
const globals = require('../globals');
|
||||
const postToInfluxdb = require('./post-to-influxdb');
|
||||
const postToMQTT = require('./post-to-mqtt');
|
||||
|
||||
var fs = require("fs");
|
||||
var path = require("path"),
|
||||
certFile = path.resolve(__dirname, globals.config.get("Butler-SOS.cert.clientCert")),
|
||||
keyFile = path.resolve(__dirname, globals.config.get("Butler-SOS.cert.clientCertKey")),
|
||||
caFile = path.resolve(__dirname, globals.config.get("Butler-SOS.cert.clientCertCA"));
|
||||
|
||||
|
||||
function setupMainMetricsTimer() {
|
||||
|
||||
// Configure timer for getting healthcheck data
|
||||
setInterval(function () {
|
||||
globals.logger.verbose("Event started: Statistics collection");
|
||||
|
||||
globals.serverList.forEach(function (server) {
|
||||
globals.logger.verbose(`Getting stats for server: ${server.serverName}`);
|
||||
|
||||
globals.logger.debug(JSON.stringify(server));
|
||||
|
||||
var tags = {
|
||||
host: server.host,
|
||||
server_name: server.serverName,
|
||||
server_description: server.serverDescription
|
||||
};
|
||||
// Check if there are any extra tags for this server that should be sent to InfluxDB
|
||||
if (server.hasOwnProperty('serverTags')) {
|
||||
|
||||
// Loop over all tags defined for the current server, adding them to the data structure that will later be passed to Influxdb
|
||||
Object.entries(server.serverTags).forEach(entry => {
|
||||
globals.logger.debug(`Found server tag: ${JSON.stringify(entry)}`);
|
||||
|
||||
tags = Object.assign(tags, {
|
||||
[entry[0]]: entry[1]
|
||||
});
|
||||
})
|
||||
|
||||
globals.logger.debug(`All tags: ${JSON.stringify(tags)}`);
|
||||
}
|
||||
globals.logger.debug(`Complete list of tags for server ${server.serverName}: ${JSON.stringify(tags)}`);
|
||||
|
||||
getHealthStatsFromSense(server.host, tags);
|
||||
});
|
||||
}, globals.config.get("Butler-SOS.serversToMonitor.pollingInterval"));
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
function getHealthStatsFromSense(host, influxTags) {
|
||||
globals.logger.debug(
|
||||
"URL=" + "https://" + host + "/engine/healthcheck/"
|
||||
);
|
||||
|
||||
request({
|
||||
followRedirect: true,
|
||||
url: "https://" + host + "/engine/healthcheck/",
|
||||
method: 'GET',
|
||||
headers: {
|
||||
"Cache-Control": "no-cache",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
json: true,
|
||||
cert: fs.readFileSync(certFile),
|
||||
key: fs.readFileSync(keyFile),
|
||||
ca: fs.readFileSync(caFile),
|
||||
rejectUnauthorized: false,
|
||||
requestCert: true,
|
||||
agent: false
|
||||
},
|
||||
function (error, response, body) {
|
||||
// Check for error
|
||||
if (error) {
|
||||
globals.logger.error(`Error when calling health check API: ${error}`);
|
||||
globals.logger.error(`Response: ${response}`);
|
||||
globals.logger.error(`Body: ${body}`);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!error && response.statusCode === 200) {
|
||||
globals.logger.verbose("Received ok response from " + influxTags.host);
|
||||
globals.logger.debug(JSON.stringify(body));
|
||||
|
||||
// Post to MQTT (if enabled)
|
||||
if (globals.config.get("Butler-SOS.mqttConfig.enableMQTT")) {
|
||||
globals.logger.debug("Calling MQTT posting method");
|
||||
postToMQTT.postHealthToMQTT(host, influxTags.host, body);
|
||||
}
|
||||
|
||||
// Post to Influxdb (if enabled)
|
||||
if (globals.config.get("Butler-SOS.influxdbConfig.enableInfluxdb")) {
|
||||
globals.logger.debug("Calling Influxdb posting method");
|
||||
postToInfluxdb.postToInfluxdb(host, body, influxTags);
|
||||
}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
module.exports = {
|
||||
setupMainMetricsTimer,
|
||||
getHealthStatsFromSense
|
||||
};
|
||||
140
src/lib/post-to-influxdb.js
Normal file
140
src/lib/post-to-influxdb.js
Normal file
@@ -0,0 +1,140 @@
|
||||
const globals = require('../globals');
|
||||
|
||||
|
||||
function postToInfluxdb(host, body, influxTags) {
|
||||
// Calculate server uptime
|
||||
|
||||
var dateTime = Date.now();
|
||||
var timestamp = Math.floor(dateTime);
|
||||
|
||||
var str = body.started;
|
||||
var year = str.substring(0, 4);
|
||||
var month = str.substring(4, 6);
|
||||
var day = str.substring(6, 8);
|
||||
var hour = str.substring(9, 11);
|
||||
var minute = str.substring(11, 13);
|
||||
var second = str.substring(13, 15);
|
||||
var dateTimeStarted = new Date(year, month - 1, day, hour, minute, second);
|
||||
var timestampStarted = Math.floor(dateTimeStarted);
|
||||
|
||||
var diff = timestamp - timestampStarted;
|
||||
|
||||
// Create a new JavaScript Date object based on the timestamp
|
||||
// multiplied by 1000 so that the argument is in milliseconds, not seconds.
|
||||
var date = new Date(diff);
|
||||
|
||||
var days = Math.trunc(diff / (1000 * 60 * 60 * 24));
|
||||
|
||||
// Hours part from the timestamp
|
||||
var hours = date.getHours();
|
||||
|
||||
// Minutes part from the timestamp
|
||||
var minutes = "0" + date.getMinutes();
|
||||
|
||||
// Seconds part from the timestamp
|
||||
var seconds = "0" + date.getSeconds();
|
||||
|
||||
// Will display time in 10:30:23 format
|
||||
var formattedTime =
|
||||
days +
|
||||
" days, " +
|
||||
hours +
|
||||
"h " +
|
||||
minutes.substr(-2) +
|
||||
"m " +
|
||||
seconds.substr(-2) +
|
||||
"s";
|
||||
|
||||
// Build tags structure that will be passed to InfluxDB
|
||||
globals.logger.debug(`Tags sent to InfluxDB: ${JSON.stringify(influxTags)}`);
|
||||
|
||||
// Write the whole reading to Influxdb
|
||||
globals.influx
|
||||
.writePoints([{
|
||||
measurement: "sense_server",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
version: body.version,
|
||||
started: body.started,
|
||||
uptime: formattedTime
|
||||
}
|
||||
},
|
||||
{
|
||||
measurement: "mem",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
comitted: body.mem.comitted,
|
||||
allocated: body.mem.allocated,
|
||||
free: body.mem.free
|
||||
}
|
||||
},
|
||||
{
|
||||
measurement: "apps",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
active_docs_count: body.apps.active_docs.length,
|
||||
loaded_docs_count: body.apps.loaded_docs.length,
|
||||
in_memory_docs_count: body.apps.in_memory_docs.length,
|
||||
active_docs: (globals.config.get("Butler-SOS.influxdbConfig.includeFields.activeDocs") ? body.apps.active_docs : ''),
|
||||
loaded_docs: (globals.config.get("Butler-SOS.influxdbConfig.includeFields.loadedDocs") ? body.apps.loaded_docs : ''),
|
||||
in_memory_docs: (globals.config.get("Butler-SOS.influxdbConfig.includeFields.inMemoryDocs") ? body.apps.in_memory_docs : ''),
|
||||
calls: body.apps.calls,
|
||||
selections: body.apps.selections
|
||||
}
|
||||
},
|
||||
{
|
||||
measurement: "cpu",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
total: body.cpu.total
|
||||
}
|
||||
},
|
||||
{
|
||||
measurement: "session",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
active: body.session.active,
|
||||
total: body.session.total
|
||||
}
|
||||
},
|
||||
{
|
||||
measurement: "users",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
active: body.users.active,
|
||||
total: body.users.total
|
||||
}
|
||||
},
|
||||
{
|
||||
measurement: "cache",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
hits: body.cache.hits,
|
||||
lookups: body.cache.lookups,
|
||||
added: body.cache.added,
|
||||
replaced: body.cache.replaced,
|
||||
bytes_added: body.cache.bytes_added
|
||||
}
|
||||
},
|
||||
{
|
||||
measurement: "saturated",
|
||||
tags: influxTags,
|
||||
fields: {
|
||||
saturated: body.saturated
|
||||
}
|
||||
}
|
||||
])
|
||||
.then(() => {
|
||||
globals.logger.verbose(`Sent health data to Influxdb for server ${influxTags.server_name}`);
|
||||
})
|
||||
|
||||
.catch(err => {
|
||||
console.error(`Error saving health data to InfluxDB! ${err.stack}`);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
module.exports = {
|
||||
postToInfluxdb
|
||||
};
|
||||
|
||||
124
src/lib/post-to-mqtt.js
Normal file
124
src/lib/post-to-mqtt.js
Normal file
@@ -0,0 +1,124 @@
|
||||
const globals = require("../globals");
|
||||
|
||||
|
||||
|
||||
function postLogDbToMQTT(
|
||||
process_host,
|
||||
process_name,
|
||||
entry_level,
|
||||
message,
|
||||
timestamp
|
||||
) {
|
||||
// Get base MQTT topic
|
||||
var baseTopic = globals.config.get("Butler-SOS.mqttConfig.baseTopic");
|
||||
|
||||
// Send to MQTT
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + process_host + "/" + process_name + "/" + entry_level,
|
||||
message
|
||||
);
|
||||
}
|
||||
|
||||
function postHealthToMQTT(host, serverName, body) {
|
||||
// Get base MQTT topic
|
||||
var baseTopic = globals.config.get("Butler-SOS.mqttConfig.baseTopic");
|
||||
|
||||
// Send to MQTT
|
||||
globals.mqttClient.publish(baseTopic + serverName + "/version", body.version);
|
||||
globals.mqttClient.publish(baseTopic + serverName + "/started", body.started);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/mem/comitted",
|
||||
body.mem.committed.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/mem/allocated",
|
||||
body.mem.allocated.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/mem/free",
|
||||
body.mem.free.toString()
|
||||
);
|
||||
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/cpu/total",
|
||||
body.cpu.total.toString()
|
||||
);
|
||||
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/session/active",
|
||||
body.session.active.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/session/total",
|
||||
body.session.total.toString()
|
||||
);
|
||||
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/apps/active_docs",
|
||||
body.apps.active_docs.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/apps/loaded_docs",
|
||||
body.apps.loaded_docs.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/apps/in_memory_docs",
|
||||
body.apps.in_memory_docs.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/apps/calls",
|
||||
body.apps.calls.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/apps/selections",
|
||||
body.apps.selections.toString()
|
||||
);
|
||||
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/users/active",
|
||||
body.users.active.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/users/total",
|
||||
body.users.total.toString()
|
||||
);
|
||||
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/cache/hits",
|
||||
body.cache.hits.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/cache/lookups",
|
||||
body.cache.lookups.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/cache/added",
|
||||
body.cache.added.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/cache/replaced",
|
||||
body.cache.replaced.toString()
|
||||
);
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/cache/bytes_added",
|
||||
body.cache.bytes_added.toString()
|
||||
);
|
||||
if (body.cache.lookups > 0) {
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/cache/hit_ratio",
|
||||
Math.floor(body.cache.hits / body.cache.lookups * 100).toString()
|
||||
);
|
||||
}
|
||||
|
||||
globals.mqttClient.publish(
|
||||
baseTopic + serverName + "/saturated",
|
||||
body.saturated.toString()
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
|
||||
module.exports = {
|
||||
postLogDbToMQTT,
|
||||
postHealthToMQTT
|
||||
};
|
||||
23
src/lib/sessions.js
Normal file
23
src/lib/sessions.js
Normal file
@@ -0,0 +1,23 @@
|
||||
// Get info on what sessions currently exist
|
||||
function setupSessionsTimer() {
|
||||
globals.logger.debug(`Monitor sessions for virtual proxies: ${JSON.stringify(globals.virtualProxyList, null, 2)}`);
|
||||
|
||||
// Configure timer for getting log data from Postgres
|
||||
setInterval(function () {
|
||||
globals.logger.verbose("Event started: Poll user sessions");
|
||||
|
||||
globals.userSessionsServers.forEach(function (server) {
|
||||
globals.logger.verbose(`Getting sessions for ${JSON.stringify(server, null, 2)}`);
|
||||
|
||||
getSessionStatsFromSense(server);
|
||||
});
|
||||
|
||||
}, globals.config.get("Butler-SOS.userSessions.pollingInterval"));
|
||||
}
|
||||
|
||||
|
||||
|
||||
module.exports = {
|
||||
setupSessionsTimer,
|
||||
getSessionsFromSense
|
||||
};
|
||||
Reference in New Issue
Block a user