Compare commits
3 Commits
developmen
...
test/incMe
Author | SHA1 | Date |
---|---|---|
philipyoo | fd05bc93af | |
philipyoo | 62d4a0d238 | |
philipyoo | 572f3c7c23 |
|
@ -4,7 +4,8 @@ const errors = require('../../lib/errors');
|
|||
const RedisClient = require('../../lib/metrics/RedisClient');
|
||||
const StatsModel = require('../../lib/metrics/StatsModel');
|
||||
const INTERVAL = 300; // 5 minutes
|
||||
const EXPIRY = 900; // 15 minutes
|
||||
const EXPIRY = 86400; // 24 hours
|
||||
const THROUGHPUT_EXPIRY = 900; // 15 minutes
|
||||
const OBJECT_MONITORING_EXPIRY = 86400; // 24 hours.
|
||||
|
||||
class Metrics {
|
||||
|
@ -14,8 +15,7 @@ class Metrics {
|
|||
this._redisClient = new RedisClient(redisConfig, this._logger);
|
||||
// Redis expiry increased by an additional interval so we can reference
|
||||
// the immediate older data for average throughput calculation
|
||||
this._statsClient = new StatsModel(this._redisClient, INTERVAL,
|
||||
(EXPIRY + INTERVAL));
|
||||
this._statsClient = new StatsModel(this._redisClient, INTERVAL, EXPIRY);
|
||||
this._validSites = validSites;
|
||||
this._internalStart = internalStart;
|
||||
}
|
||||
|
@ -96,6 +96,16 @@ class Metrics {
|
|||
return cb(null, data);
|
||||
}
|
||||
|
||||
/**
|
||||
* Uptime of server based on this._internalStart up to max of expiry
|
||||
* @param {number} expiry - max expiry
|
||||
* @return {number} uptime of server up to expiry time
|
||||
*/
|
||||
_getMaxUptime(expiry) {
|
||||
const timeSinceStart = (Date.now() - this._internalStart) / 1000;
|
||||
return timeSinceStart < expiry ? (timeSinceStart || 1) : expiry;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get replication backlog in ops count and size in bytes
|
||||
* @param {object} details - route details from lib/backbeat/routes.js
|
||||
|
@ -119,13 +129,16 @@ class Metrics {
|
|||
});
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
const d = res.map(r => (
|
||||
r.requests.slice(0, 3).reduce((acc, i) => acc + i, 0)
|
||||
const uptime = this._getMaxUptime(EXPIRY);
|
||||
const numOfIntervals = Math.ceil(uptime / INTERVAL);
|
||||
const [ops, opsDone, bytes, bytesDone] = res.map(r => (
|
||||
r.requests.slice(0, numOfIntervals).reduce((acc, i) =>
|
||||
acc + i, 0)
|
||||
));
|
||||
|
||||
let opsBacklog = d[0] - d[1];
|
||||
let opsBacklog = ops - opsDone;
|
||||
if (opsBacklog < 0) opsBacklog = 0;
|
||||
let bytesBacklog = d[2] - d[3];
|
||||
let bytesBacklog = bytes - bytesDone;
|
||||
if (bytesBacklog < 0) bytesBacklog = 0;
|
||||
const response = {
|
||||
backlog: {
|
||||
|
@ -166,13 +179,9 @@ class Metrics {
|
|||
return cb(errors.InternalError);
|
||||
}
|
||||
|
||||
// Find if time since start is less than EXPIRY time
|
||||
const timeSinceStart = (Date.now() - this._internalStart) / 1000;
|
||||
const timeDisplay = timeSinceStart < EXPIRY ?
|
||||
timeSinceStart : EXPIRY;
|
||||
const numOfIntervals = Math.ceil(timeDisplay / INTERVAL);
|
||||
|
||||
const d = res.map(r => (
|
||||
const uptime = this._getMaxUptime(EXPIRY);
|
||||
const numOfIntervals = Math.ceil(uptime / INTERVAL);
|
||||
const [opsDone, bytesDone] = res.map(r => (
|
||||
r.requests.slice(0, numOfIntervals).reduce((acc, i) =>
|
||||
acc + i, 0)
|
||||
));
|
||||
|
@ -181,10 +190,10 @@ class Metrics {
|
|||
completions: {
|
||||
description: 'Number of completed replication operations ' +
|
||||
'(count) and number of bytes transferred (size) in ' +
|
||||
`the last ${Math.floor(timeDisplay)} seconds`,
|
||||
`the last ${Math.floor(uptime)} seconds`,
|
||||
results: {
|
||||
count: d[0],
|
||||
size: d[1],
|
||||
count: opsDone,
|
||||
size: bytesDone,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
@ -216,13 +225,9 @@ class Metrics {
|
|||
return cb(errors.InternalError);
|
||||
}
|
||||
|
||||
// Find if time since start is less than EXPIRY time
|
||||
const timeSinceStart = (Date.now() - this._internalStart) / 1000;
|
||||
const timeDisplay = timeSinceStart < EXPIRY ?
|
||||
timeSinceStart : EXPIRY;
|
||||
const numOfIntervals = Math.ceil(timeDisplay / INTERVAL);
|
||||
|
||||
const d = res.map(r => (
|
||||
const uptime = this._getMaxUptime(EXPIRY);
|
||||
const numOfIntervals = Math.ceil(uptime / INTERVAL);
|
||||
const [opsFail, bytesFail] = res.map(r => (
|
||||
r.requests.slice(0, numOfIntervals).reduce((acc, i) =>
|
||||
acc + i, 0)
|
||||
));
|
||||
|
@ -231,10 +236,10 @@ class Metrics {
|
|||
failures: {
|
||||
description: 'Number of failed replication operations ' +
|
||||
'(count) and bytes (size) in the last ' +
|
||||
`${Math.floor(timeDisplay)} seconds`,
|
||||
`${Math.floor(uptime)} seconds`,
|
||||
results: {
|
||||
count: d[0],
|
||||
size: d[1],
|
||||
count: opsFail,
|
||||
size: bytesFail,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
@ -243,7 +248,7 @@ class Metrics {
|
|||
}
|
||||
|
||||
/**
|
||||
* Get current throughput in ops/sec and bytes/sec
|
||||
* Get current throughput in ops/sec and bytes/sec up to max of 15 minutes
|
||||
* Throughput is the number of units processed in a given time
|
||||
* @param {object} details - route details from lib/backbeat/routes.js
|
||||
* @param {function} cb - callback(error, data)
|
||||
|
@ -266,42 +271,37 @@ class Metrics {
|
|||
});
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
|
||||
const now = new Date();
|
||||
const timeSinceStart = (now - this._internalStart) / 1000;
|
||||
// Seconds up to a max of EXPIRY seconds
|
||||
const timeDisplay = timeSinceStart < EXPIRY ?
|
||||
(timeSinceStart || 1) : EXPIRY;
|
||||
const numOfIntervals = Math.ceil(timeDisplay / INTERVAL);
|
||||
const uptime = this._getMaxUptime(THROUGHPUT_EXPIRY);
|
||||
const numOfIntervals = Math.ceil(uptime / INTERVAL);
|
||||
|
||||
const [opsThroughput, bytesThroughput] = res.map(r => {
|
||||
let total = r.requests.slice(0, numOfIntervals).reduce(
|
||||
(acc, i) => acc + i, 0);
|
||||
|
||||
// if timeDisplay !== EXPIRY, use internal timer and do not
|
||||
// include the extra 4th interval
|
||||
if (timeDisplay === EXPIRY) {
|
||||
// if timeDisplay !== THROUGHPUT_EXPIRY, use internal timer and
|
||||
// do not include the extra 4th interval
|
||||
if (uptime === THROUGHPUT_EXPIRY) {
|
||||
// all intervals apply, including 4th interval
|
||||
const lastInterval =
|
||||
this._statsClient._normalizeTimestamp(new Date(now));
|
||||
this._statsClient._normalizeTimestamp(now);
|
||||
// in seconds
|
||||
const diff = (now - lastInterval) / 1000;
|
||||
|
||||
// Get average for last interval depending on time
|
||||
// surpassed so far for newest interval
|
||||
total += ((INTERVAL - diff) / INTERVAL) *
|
||||
r.requests[numOfIntervals];
|
||||
}
|
||||
|
||||
// Divide total by timeDisplay to determine data per second
|
||||
return (total / timeDisplay);
|
||||
// Divide total by uptime to determine data per second
|
||||
return (total / uptime);
|
||||
});
|
||||
|
||||
const response = {
|
||||
throughput: {
|
||||
description: 'Current throughput for replication ' +
|
||||
'operations in ops/sec (count) and bytes/sec (size) ' +
|
||||
`in the last ${Math.floor(timeDisplay)} seconds`,
|
||||
`in the last ${Math.floor(uptime)} seconds`,
|
||||
results: {
|
||||
count: opsThroughput.toFixed(2),
|
||||
size: bytesThroughput.toFixed(2),
|
||||
|
@ -336,20 +336,18 @@ class Metrics {
|
|||
return cb(errors.InternalError);
|
||||
}
|
||||
const now = new Date();
|
||||
const timeSinceStart = (now - this._internalStart) / 1000;
|
||||
// Seconds up to a max of EXPIRY seconds
|
||||
const timeDisplay = timeSinceStart < EXPIRY ?
|
||||
(timeSinceStart || 1) : EXPIRY;
|
||||
const numOfIntervals = Math.ceil(timeDisplay / INTERVAL);
|
||||
const uptime = this._getMaxUptime(THROUGHPUT_EXPIRY);
|
||||
const numOfIntervals = Math.ceil(uptime / INTERVAL);
|
||||
|
||||
const { requests } = res[0]; // Bytes done
|
||||
let total = requests.slice(0, numOfIntervals)
|
||||
.reduce((acc, i) => acc + i, 0);
|
||||
// if timeDisplay !== OBJECT_MONITORING_EXPIRY, use internal timer
|
||||
// if timeDisplay !== THROUGHPUT_EXPIRY, use internal timer
|
||||
// and do not include the extra 4th interval
|
||||
if (timeDisplay === EXPIRY) {
|
||||
if (uptime === THROUGHPUT_EXPIRY) {
|
||||
// all intervals apply, including 4th interval
|
||||
const lastInterval =
|
||||
this._statsClient._normalizeTimestamp(new Date(now));
|
||||
this._statsClient._normalizeTimestamp(now);
|
||||
// in seconds
|
||||
const diff = (now - lastInterval) / 1000;
|
||||
// Get average for last interval depending on time passed so
|
||||
|
@ -361,7 +359,7 @@ class Metrics {
|
|||
const response = {
|
||||
description: 'Current throughput for object replication in ' +
|
||||
'bytes/sec (throughput)',
|
||||
throughput: (total / timeDisplay).toFixed(2),
|
||||
throughput: (total / uptime).toFixed(2),
|
||||
};
|
||||
return cb(null, response);
|
||||
});
|
||||
|
@ -392,10 +390,8 @@ class Metrics {
|
|||
}
|
||||
// Find if time since start is less than OBJECT_MONITORING_EXPIRY
|
||||
// time
|
||||
const timeSinceStart = (Date.now() - this._internalStart) / 1000;
|
||||
const timeDisplay = timeSinceStart < OBJECT_MONITORING_EXPIRY ?
|
||||
timeSinceStart : OBJECT_MONITORING_EXPIRY;
|
||||
const numOfIntervals = Math.ceil(timeDisplay / INTERVAL);
|
||||
const uptime = this._getMaxUptime(OBJECT_MONITORING_EXPIRY);
|
||||
const numOfIntervals = Math.ceil(uptime / INTERVAL);
|
||||
const [totalBytesToComplete, bytesComplete] = res.map(r => (
|
||||
r.requests.slice(0, numOfIntervals).reduce((acc, i) =>
|
||||
acc + i, 0)
|
||||
|
|
|
@ -5,6 +5,10 @@ const assert = require('assert');
|
|||
const RedisClient = require('../../../lib/metrics/RedisClient');
|
||||
const { backbeat } = require('../../../');
|
||||
|
||||
// expirations
|
||||
const EXPIRY = 86400; // 24 hours
|
||||
const THROUGHPUT_EXPIRY = 900; // 15 minutes
|
||||
|
||||
// setup redis client
|
||||
const config = {
|
||||
host: '127.0.0.1',
|
||||
|
@ -22,7 +26,7 @@ const sites = ['site1', 'site2'];
|
|||
const metrics = new backbeat.Metrics({
|
||||
redisConfig: config,
|
||||
validSites: ['site1', 'site2', 'all'],
|
||||
internalStart: Date.now() - 900000, // 15 minutes ago.
|
||||
internalStart: Date.now() - (EXPIRY * 1000), // 24 hours ago.
|
||||
}, fakeLogger);
|
||||
|
||||
// Since many methods were overwritten, these tests should validate the changes
|
||||
|
@ -57,7 +61,7 @@ describe('Metrics class', () => {
|
|||
completions: {
|
||||
description: 'Number of completed replication operations' +
|
||||
' (count) and number of bytes transferred (size) in ' +
|
||||
'the last 900 seconds',
|
||||
`the last ${EXPIRY} seconds`,
|
||||
results: {
|
||||
count: 0,
|
||||
size: 0,
|
||||
|
@ -65,7 +69,8 @@ describe('Metrics class', () => {
|
|||
},
|
||||
failures: {
|
||||
description: 'Number of failed replication operations ' +
|
||||
'(count) and bytes (size) in the last 900 seconds',
|
||||
`(count) and bytes (size) in the last ${EXPIRY} ` +
|
||||
'seconds',
|
||||
results: {
|
||||
count: 0,
|
||||
size: 0,
|
||||
|
@ -74,7 +79,7 @@ describe('Metrics class', () => {
|
|||
throughput: {
|
||||
description: 'Current throughput for replication' +
|
||||
' operations in ops/sec (count) and bytes/sec (size) ' +
|
||||
'in the last 900 seconds',
|
||||
`in the last ${THROUGHPUT_EXPIRY} seconds`,
|
||||
results: {
|
||||
count: '0.00',
|
||||
size: '0.00',
|
||||
|
|
|
@ -20,9 +20,14 @@ const redisClient = new RedisClient(config, fakeLogger);
|
|||
|
||||
// setup stats model
|
||||
const STATS_INTERVAL = 300; // 5 minutes
|
||||
const STATS_EXPIRY = 900; // 15 minutes
|
||||
const STATS_EXPIRY = 86400; // 24 hours
|
||||
const statsModel = new StatsModel(redisClient, STATS_INTERVAL, STATS_EXPIRY);
|
||||
|
||||
function setExpectedStats(expected) {
|
||||
return expected.concat(
|
||||
Array((STATS_EXPIRY / STATS_INTERVAL) - expected.length).fill(0));
|
||||
}
|
||||
|
||||
// Since many methods were overwritten, these tests should validate the changes
|
||||
// made to the original methods
|
||||
describe('StatsModel class', () => {
|
||||
|
@ -65,7 +70,7 @@ describe('StatsModel class', () => {
|
|||
[null, '2'],
|
||||
[null, null],
|
||||
]);
|
||||
assert.deepStrictEqual(res, [1, 2, 0]);
|
||||
assert.deepStrictEqual(res, setExpectedStats([1, 2]));
|
||||
});
|
||||
|
||||
it('should correctly record a new request by default one increment',
|
||||
|
@ -101,7 +106,7 @@ describe('StatsModel class', () => {
|
|||
statsModel.getStats(fakeLogger, id, (err, res) => {
|
||||
assert.ifError(err);
|
||||
|
||||
assert.deepStrictEqual(res.requests, [9, 0, 0]);
|
||||
assert.deepStrictEqual(res.requests, setExpectedStats([9]));
|
||||
next();
|
||||
});
|
||||
},
|
||||
|
@ -110,7 +115,8 @@ describe('StatsModel class', () => {
|
|||
statsModel.getStats(fakeLogger, id, (err, res) => {
|
||||
assert.ifError(err);
|
||||
|
||||
assert.deepStrictEqual(res.requests, [10, 0, 0]);
|
||||
assert.deepStrictEqual(res.requests,
|
||||
setExpectedStats([10]));
|
||||
next();
|
||||
});
|
||||
},
|
||||
|
@ -119,7 +125,8 @@ describe('StatsModel class', () => {
|
|||
statsModel.getStats(fakeLogger, id, (err, res) => {
|
||||
assert.ifError(err);
|
||||
|
||||
assert.deepStrictEqual(res.requests, [11, 0, 0]);
|
||||
assert.deepStrictEqual(res.requests,
|
||||
setExpectedStats([11]));
|
||||
next();
|
||||
});
|
||||
},
|
||||
|
@ -155,8 +162,8 @@ describe('StatsModel class', () => {
|
|||
assert.ifError(err);
|
||||
|
||||
const expected = {
|
||||
'requests': [1, 0, 0],
|
||||
'500s': [1, 0, 0],
|
||||
'requests': setExpectedStats([1]),
|
||||
'500s': setExpectedStats([1]),
|
||||
'sampleDuration': STATS_EXPIRY,
|
||||
};
|
||||
assert.deepStrictEqual(res, expected);
|
||||
|
@ -172,8 +179,8 @@ describe('StatsModel class', () => {
|
|||
statsModel.getStats(fakeLogger, id, (err, res) => {
|
||||
assert.ifError(err);
|
||||
const expected = {
|
||||
'requests': [0, 0, 0],
|
||||
'500s': [0, 0, 0],
|
||||
'requests': setExpectedStats([]),
|
||||
'500s': setExpectedStats([]),
|
||||
'sampleDuration': STATS_EXPIRY,
|
||||
};
|
||||
assert.deepStrictEqual(res, expected);
|
||||
|
@ -184,8 +191,8 @@ describe('StatsModel class', () => {
|
|||
statsModel.getAllStats(fakeLogger, id, (err, res) => {
|
||||
assert.ifError(err);
|
||||
const expected = {
|
||||
'requests': [0, 0, 0],
|
||||
'500s': [0, 0, 0],
|
||||
'requests': setExpectedStats([]),
|
||||
'500s': setExpectedStats([]),
|
||||
'sampleDuration': STATS_EXPIRY,
|
||||
};
|
||||
assert.deepStrictEqual(res, expected);
|
||||
|
@ -200,10 +207,8 @@ describe('StatsModel class', () => {
|
|||
statsModel.getAllStats(fakeLogger, [], (err, res) => {
|
||||
assert.ifError(err);
|
||||
|
||||
const expected = Array(STATS_EXPIRY / STATS_INTERVAL).fill(0);
|
||||
|
||||
assert.deepStrictEqual(res.requests, expected);
|
||||
assert.deepStrictEqual(res['500s'], expected);
|
||||
assert.deepStrictEqual(res.requests, setExpectedStats([]));
|
||||
assert.deepStrictEqual(res['500s'], setExpectedStats([]));
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
@ -231,7 +236,8 @@ describe('StatsModel class', () => {
|
|||
assert.ifError(err);
|
||||
|
||||
assert.equal(res.requests[0], 14);
|
||||
assert.deepStrictEqual(res.requests, [14, 0, 0]);
|
||||
assert.deepStrictEqual(res.requests,
|
||||
setExpectedStats([14]));
|
||||
next();
|
||||
});
|
||||
},
|
||||
|
|
Loading…
Reference in New Issue