Compare commits
No commits in common. "e092c97db15c477c18e2f12754204533e213e102" and "7290208a208b34c3791277e33e6e0c32564212c3" have entirely different histories.
e092c97db1
...
7290208a20
|
@ -387,10 +387,6 @@
|
||||||
"code": 409,
|
"code": 409,
|
||||||
"description": "The request was rejected because it attempted to create a resource that already exists."
|
"description": "The request was rejected because it attempted to create a resource that already exists."
|
||||||
},
|
},
|
||||||
"KeyAlreadyExists": {
|
|
||||||
"code": 409,
|
|
||||||
"description": "The request was rejected because it attempted to create a resource that already exists."
|
|
||||||
},
|
|
||||||
"ServiceFailure": {
|
"ServiceFailure": {
|
||||||
"code": 500,
|
"code": 500,
|
||||||
"description": "Server error: the request processing has failed because of an unknown error, exception or failure."
|
"description": "Server error: the request processing has failed because of an unknown error, exception or failure."
|
||||||
|
|
|
@ -8,7 +8,7 @@ COPY buildbot_worker_packages.list arsenal_packages.list /tmp/
|
||||||
RUN apt-get update -q && apt-get -qy install curl apt-transport-https \
|
RUN apt-get update -q && apt-get -qy install curl apt-transport-https \
|
||||||
&& apt-get install -qy software-properties-common python-software-properties \
|
&& apt-get install -qy software-properties-common python-software-properties \
|
||||||
&& curl --silent https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add - \
|
&& curl --silent https://deb.nodesource.com/gpgkey/nodesource.gpg.key | apt-key add - \
|
||||||
&& echo "deb https://deb.nodesource.com/node_8.x trusty main" > /etc/apt/sources.list.d/nodesource.list \
|
&& echo "deb https://deb.nodesource.com/node_6.x trusty main" > /etc/apt/sources.list.d/nodesource.list \
|
||||||
&& add-apt-repository ppa:ubuntu-toolchain-r/test \
|
&& add-apt-repository ppa:ubuntu-toolchain-r/test \
|
||||||
&& apt-get update -q \
|
&& apt-get update -q \
|
||||||
&& cat /tmp/buildbot_worker_packages.list | xargs apt-get install -qy \
|
&& cat /tmp/buildbot_worker_packages.list | xargs apt-get install -qy \
|
||||||
|
|
52
index.js
52
index.js
|
@ -3,7 +3,6 @@ module.exports = {
|
||||||
constants: require('./lib/constants'),
|
constants: require('./lib/constants'),
|
||||||
db: require('./lib/db'),
|
db: require('./lib/db'),
|
||||||
errors: require('./lib/errors.js'),
|
errors: require('./lib/errors.js'),
|
||||||
errorUtils: require('./lib/errorUtils'),
|
|
||||||
shuffle: require('./lib/shuffle'),
|
shuffle: require('./lib/shuffle'),
|
||||||
stringHash: require('./lib/stringHash'),
|
stringHash: require('./lib/stringHash'),
|
||||||
ipCheck: require('./lib/ipCheck'),
|
ipCheck: require('./lib/ipCheck'),
|
||||||
|
@ -13,7 +12,15 @@ module.exports = {
|
||||||
dhparam: require('./lib/https/dh2048.js'),
|
dhparam: require('./lib/https/dh2048.js'),
|
||||||
},
|
},
|
||||||
algorithms: {
|
algorithms: {
|
||||||
list: require('./lib/algos/list/exportAlgos'),
|
list: {
|
||||||
|
Basic: require('./lib/algos/list/basic').List,
|
||||||
|
Delimiter: require('./lib/algos/list/delimiter').Delimiter,
|
||||||
|
DelimiterVersions: require('./lib/algos/list/delimiterVersions')
|
||||||
|
.DelimiterVersions,
|
||||||
|
DelimiterMaster: require('./lib/algos/list/delimiterMaster')
|
||||||
|
.DelimiterMaster,
|
||||||
|
MPU: require('./lib/algos/list/MPU').MultipartUploads,
|
||||||
|
},
|
||||||
listTools: {
|
listTools: {
|
||||||
DelimiterTools: require('./lib/algos/list/tools'),
|
DelimiterTools: require('./lib/algos/list/tools'),
|
||||||
},
|
},
|
||||||
|
@ -46,10 +53,6 @@ module.exports = {
|
||||||
RESTClient: require('./lib/network/rest/RESTClient'),
|
RESTClient: require('./lib/network/rest/RESTClient'),
|
||||||
},
|
},
|
||||||
RoundRobin: require('./lib/network/RoundRobin'),
|
RoundRobin: require('./lib/network/RoundRobin'),
|
||||||
probe: {
|
|
||||||
HealthProbeServer:
|
|
||||||
require('./lib/network/probe/HealthProbeServer.js'),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
s3routes: {
|
s3routes: {
|
||||||
routes: require('./lib/s3routes/routes'),
|
routes: require('./lib/s3routes/routes'),
|
||||||
|
@ -60,9 +63,6 @@ module.exports = {
|
||||||
convertToXml: require('./lib/s3middleware/convertToXml'),
|
convertToXml: require('./lib/s3middleware/convertToXml'),
|
||||||
escapeForXml: require('./lib/s3middleware/escapeForXml'),
|
escapeForXml: require('./lib/s3middleware/escapeForXml'),
|
||||||
tagging: require('./lib/s3middleware/tagging'),
|
tagging: require('./lib/s3middleware/tagging'),
|
||||||
checkDateModifiedHeaders:
|
|
||||||
require('./lib/s3middleware/validateConditionalHeaders')
|
|
||||||
.checkDateModifiedHeaders,
|
|
||||||
validateConditionalHeaders:
|
validateConditionalHeaders:
|
||||||
require('./lib/s3middleware/validateConditionalHeaders')
|
require('./lib/s3middleware/validateConditionalHeaders')
|
||||||
.validateConditionalHeaders,
|
.validateConditionalHeaders,
|
||||||
|
@ -80,39 +80,12 @@ module.exports = {
|
||||||
},
|
},
|
||||||
storage: {
|
storage: {
|
||||||
metadata: {
|
metadata: {
|
||||||
MetadataWrapper: require('./lib/storage/metadata/MetadataWrapper'),
|
|
||||||
bucketclient: {
|
|
||||||
BucketClientInterface:
|
|
||||||
require('./lib/storage/metadata/bucketclient/' +
|
|
||||||
'BucketClientInterface'),
|
|
||||||
LogConsumer:
|
|
||||||
require('./lib/storage/metadata/bucketclient/LogConsumer'),
|
|
||||||
},
|
|
||||||
file: {
|
|
||||||
BucketFileInterface:
|
|
||||||
require('./lib/storage/metadata/file/BucketFileInterface'),
|
|
||||||
MetadataFileServer:
|
MetadataFileServer:
|
||||||
require('./lib/storage/metadata/file/MetadataFileServer'),
|
require('./lib/storage/metadata/file/MetadataFileServer'),
|
||||||
MetadataFileClient:
|
MetadataFileClient:
|
||||||
require('./lib/storage/metadata/file/MetadataFileClient'),
|
require('./lib/storage/metadata/file/MetadataFileClient'),
|
||||||
},
|
|
||||||
inMemory: {
|
|
||||||
metastore:
|
|
||||||
require('./lib/storage/metadata/in_memory/metastore'),
|
|
||||||
metadata: require('./lib/storage/metadata/in_memory/metadata'),
|
|
||||||
bucketUtilities:
|
|
||||||
require('./lib/storage/metadata/in_memory/bucket_utilities'),
|
|
||||||
},
|
|
||||||
mongoclient: {
|
|
||||||
MongoClientInterface:
|
|
||||||
require('./lib/storage/metadata/mongoclient/' +
|
|
||||||
'MongoClientInterface'),
|
|
||||||
LogConsumer:
|
LogConsumer:
|
||||||
require('./lib/storage/metadata/mongoclient/LogConsumer'),
|
require('./lib/storage/metadata/bucketclient/LogConsumer'),
|
||||||
},
|
|
||||||
proxy: {
|
|
||||||
Server: require('./lib/storage/metadata/proxy/Server'),
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
data: {
|
data: {
|
||||||
file: {
|
file: {
|
||||||
|
@ -135,14 +108,9 @@ module.exports = {
|
||||||
},
|
},
|
||||||
metrics: {
|
metrics: {
|
||||||
StatsClient: require('./lib/metrics/StatsClient'),
|
StatsClient: require('./lib/metrics/StatsClient'),
|
||||||
StatsModel: require('./lib/metrics/StatsModel'),
|
|
||||||
RedisClient: require('./lib/metrics/RedisClient'),
|
RedisClient: require('./lib/metrics/RedisClient'),
|
||||||
},
|
},
|
||||||
pensieve: {
|
pensieve: {
|
||||||
credentialUtils: require('./lib/executables/pensieveCreds/utils'),
|
credentialUtils: require('./lib/executables/pensieveCreds/utils'),
|
||||||
},
|
},
|
||||||
backbeat: {
|
|
||||||
Metrics: require('./lib/backbeat/Metrics'),
|
|
||||||
routes: require('./lib/backbeat/routes'),
|
|
||||||
},
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -125,6 +125,9 @@ class Delimiter extends Extension {
|
||||||
* @return {number} - indicates if iteration should continue
|
* @return {number} - indicates if iteration should continue
|
||||||
*/
|
*/
|
||||||
addContents(key, value) {
|
addContents(key, value) {
|
||||||
|
if (this._reachedMaxKeys()) {
|
||||||
|
return FILTER_END;
|
||||||
|
}
|
||||||
this.Contents.push({ key, value });
|
this.Contents.push({ key, value });
|
||||||
this.NextMarker = key;
|
this.NextMarker = key;
|
||||||
++this.keys;
|
++this.keys;
|
||||||
|
@ -143,10 +146,6 @@ class Delimiter extends Extension {
|
||||||
* @return {number} - indicates if iteration should continue
|
* @return {number} - indicates if iteration should continue
|
||||||
*/
|
*/
|
||||||
filter(obj) {
|
filter(obj) {
|
||||||
if (this._reachedMaxKeys()) {
|
|
||||||
return FILTER_END;
|
|
||||||
}
|
|
||||||
|
|
||||||
const key = obj.key;
|
const key = obj.key;
|
||||||
const value = obj.value;
|
const value = obj.value;
|
||||||
if ((this.prefix && !key.startsWith(this.prefix))
|
if ((this.prefix && !key.startsWith(this.prefix))
|
||||||
|
@ -178,6 +177,9 @@ class Delimiter extends Extension {
|
||||||
const commonPrefix = getCommonPrefix(key, this.delimiter, index);
|
const commonPrefix = getCommonPrefix(key, this.delimiter, index);
|
||||||
if (this.CommonPrefixes.indexOf(commonPrefix) === -1
|
if (this.CommonPrefixes.indexOf(commonPrefix) === -1
|
||||||
&& this.NextMarker !== commonPrefix) {
|
&& this.NextMarker !== commonPrefix) {
|
||||||
|
if (this._reachedMaxKeys()) {
|
||||||
|
return FILTER_END;
|
||||||
|
}
|
||||||
this.CommonPrefixes.push(commonPrefix);
|
this.CommonPrefixes.push(commonPrefix);
|
||||||
this.NextMarker = commonPrefix;
|
this.NextMarker = commonPrefix;
|
||||||
++this.keys;
|
++this.keys;
|
||||||
|
|
|
@ -3,12 +3,7 @@
|
||||||
const Delimiter = require('./delimiter').Delimiter;
|
const Delimiter = require('./delimiter').Delimiter;
|
||||||
const Version = require('../../versioning/Version').Version;
|
const Version = require('../../versioning/Version').Version;
|
||||||
const VSConst = require('../../versioning/constants').VersioningConstants;
|
const VSConst = require('../../versioning/constants').VersioningConstants;
|
||||||
const {
|
const { FILTER_ACCEPT, FILTER_SKIP, SKIP_NONE } = require('./tools');
|
||||||
FILTER_ACCEPT,
|
|
||||||
FILTER_SKIP,
|
|
||||||
SKIP_NONE,
|
|
||||||
FILTER_END,
|
|
||||||
} = require('./tools');
|
|
||||||
|
|
||||||
const VID_SEP = VSConst.VersionId.Separator;
|
const VID_SEP = VSConst.VersionId.Separator;
|
||||||
|
|
||||||
|
@ -44,10 +39,6 @@ class DelimiterMaster extends Delimiter {
|
||||||
* @return {number} - indicates if iteration should continue
|
* @return {number} - indicates if iteration should continue
|
||||||
*/
|
*/
|
||||||
filter(obj) {
|
filter(obj) {
|
||||||
if (this._reachedMaxKeys()) {
|
|
||||||
return FILTER_END;
|
|
||||||
}
|
|
||||||
|
|
||||||
let key = obj.key;
|
let key = obj.key;
|
||||||
const value = obj.value;
|
const value = obj.value;
|
||||||
|
|
||||||
|
|
|
@ -72,6 +72,9 @@ class DelimiterVersions extends Delimiter {
|
||||||
* @return {Boolean} - indicates if iteration should continue
|
* @return {Boolean} - indicates if iteration should continue
|
||||||
*/
|
*/
|
||||||
addContents(obj) {
|
addContents(obj) {
|
||||||
|
if (this._reachedMaxKeys()) {
|
||||||
|
return FILTER_END;
|
||||||
|
}
|
||||||
this.Contents.push(obj);
|
this.Contents.push(obj);
|
||||||
this.NextMarker = obj.key;
|
this.NextMarker = obj.key;
|
||||||
this.NextVersionIdMarker = obj.versionId;
|
this.NextVersionIdMarker = obj.versionId;
|
||||||
|
@ -91,9 +94,6 @@ class DelimiterVersions extends Delimiter {
|
||||||
* @return {number} - indicates if iteration should continue
|
* @return {number} - indicates if iteration should continue
|
||||||
*/
|
*/
|
||||||
filter(obj) {
|
filter(obj) {
|
||||||
if (this._reachedMaxKeys()) {
|
|
||||||
return FILTER_END;
|
|
||||||
}
|
|
||||||
if (Version.isPHD(obj.value)) {
|
if (Version.isPHD(obj.value)) {
|
||||||
return FILTER_ACCEPT; // trick repd to not increase its streak
|
return FILTER_ACCEPT; // trick repd to not increase its streak
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +0,0 @@
|
||||||
module.exports = {
|
|
||||||
Basic: require('./basic').List,
|
|
||||||
Delimiter: require('./delimiter').Delimiter,
|
|
||||||
DelimiterVersions: require('./delimiterVersions')
|
|
||||||
.DelimiterVersions,
|
|
||||||
DelimiterMaster: require('./delimiterMaster')
|
|
||||||
.DelimiterMaster,
|
|
||||||
MPU: require('./MPU').MultipartUploads,
|
|
||||||
};
|
|
|
@ -1,88 +0,0 @@
|
||||||
const assert = require('assert');
|
|
||||||
|
|
||||||
const { FILTER_END, FILTER_SKIP, SKIP_NONE } = require('./tools');
|
|
||||||
|
|
||||||
|
|
||||||
const MAX_STREAK_LENGTH = 100;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Handle the filtering and the skip mechanism of a listing result.
|
|
||||||
*/
|
|
||||||
class Skip {
|
|
||||||
/**
|
|
||||||
* @param {Object} params - skip parameters
|
|
||||||
* @param {Object} params.extension - delimiter extension used (required)
|
|
||||||
* @param {String} params.gte - current range gte (greater than or
|
|
||||||
* equal) used by the client code
|
|
||||||
*/
|
|
||||||
constructor(params) {
|
|
||||||
assert(params.extension);
|
|
||||||
|
|
||||||
this.extension = params.extension;
|
|
||||||
this.gteParams = params.gte;
|
|
||||||
|
|
||||||
this.listingEndCb = null;
|
|
||||||
this.skipRangeCb = null;
|
|
||||||
|
|
||||||
/* Used to count consecutive FILTER_SKIP returned by the extension
|
|
||||||
* filter method. Once this counter reaches MAX_STREAK_LENGTH, the
|
|
||||||
* filter function tries to skip unwanted values by defining a new
|
|
||||||
* range. */
|
|
||||||
this.streakLength = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
setListingEndCb(cb) {
|
|
||||||
this.listingEndCb = cb;
|
|
||||||
}
|
|
||||||
|
|
||||||
setSkipRangeCb(cb) {
|
|
||||||
this.skipRangeCb = cb;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Filter an entry.
|
|
||||||
* @param {Object} entry - entry to filter.
|
|
||||||
* @return {undefined}
|
|
||||||
*
|
|
||||||
* This function calls the listing end or the skip range callbacks if
|
|
||||||
* needed.
|
|
||||||
*/
|
|
||||||
filter(entry) {
|
|
||||||
assert(this.listingEndCb);
|
|
||||||
assert(this.skipRangeCb);
|
|
||||||
|
|
||||||
const filteringResult = this.extension.filter(entry);
|
|
||||||
const skippingRange = this.extension.skipping();
|
|
||||||
|
|
||||||
if (filteringResult === FILTER_END) {
|
|
||||||
this.listingEndCb();
|
|
||||||
} else if (filteringResult === FILTER_SKIP
|
|
||||||
&& skippingRange !== SKIP_NONE) {
|
|
||||||
if (++this.streakLength >= MAX_STREAK_LENGTH) {
|
|
||||||
const newRange = this._inc(skippingRange);
|
|
||||||
|
|
||||||
/* Avoid to loop on the same range again and again. */
|
|
||||||
if (newRange === this.gteParams) {
|
|
||||||
this.streakLength = 1;
|
|
||||||
} else {
|
|
||||||
this.skipRangeCb(newRange);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
this.streakLength = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_inc(str) {
|
|
||||||
if (!str) {
|
|
||||||
return str;
|
|
||||||
}
|
|
||||||
const lastCharValue = str.charCodeAt(str.length - 1);
|
|
||||||
const lastCharNewValue = String.fromCharCode(lastCharValue + 1);
|
|
||||||
|
|
||||||
return `${str.slice(0, str.length - 1)}${lastCharNewValue}`;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
module.exports = Skip;
|
|
|
@ -127,17 +127,6 @@ function check(request, log, data, awsService) {
|
||||||
return { err: errors.RequestTimeTooSkewed };
|
return { err: errors.RequestTimeTooSkewed };
|
||||||
}
|
}
|
||||||
|
|
||||||
let proxyPath = null;
|
|
||||||
if (request.headers.proxy_path) {
|
|
||||||
try {
|
|
||||||
proxyPath = decodeURIComponent(request.headers.proxy_path);
|
|
||||||
} catch (err) {
|
|
||||||
log.debug('invalid proxy_path header', { proxyPath, err });
|
|
||||||
return { err: errors.InvalidArgument.customizeDescription(
|
|
||||||
'invalid proxy_path header') };
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
const stringToSign = constructStringToSign({
|
const stringToSign = constructStringToSign({
|
||||||
log,
|
log,
|
||||||
request,
|
request,
|
||||||
|
@ -147,7 +136,6 @@ function check(request, log, data, awsService) {
|
||||||
timestamp,
|
timestamp,
|
||||||
payloadChecksum,
|
payloadChecksum,
|
||||||
awsService: service,
|
awsService: service,
|
||||||
proxyPath,
|
|
||||||
});
|
});
|
||||||
log.trace('constructed stringToSign', { stringToSign });
|
log.trace('constructed stringToSign', { stringToSign });
|
||||||
if (stringToSign instanceof Error) {
|
if (stringToSign instanceof Error) {
|
||||||
|
|
|
@ -62,17 +62,6 @@ function check(request, log, data) {
|
||||||
return { err: errors.RequestTimeTooSkewed };
|
return { err: errors.RequestTimeTooSkewed };
|
||||||
}
|
}
|
||||||
|
|
||||||
let proxyPath = null;
|
|
||||||
if (request.headers.proxy_path) {
|
|
||||||
try {
|
|
||||||
proxyPath = decodeURIComponent(request.headers.proxy_path);
|
|
||||||
} catch (err) {
|
|
||||||
log.debug('invalid proxy_path header', { proxyPath });
|
|
||||||
return { err: errors.InvalidArgument.customizeDescription(
|
|
||||||
'invalid proxy_path header') };
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// In query v4 auth, the canonical request needs
|
// In query v4 auth, the canonical request needs
|
||||||
// to include the query params OTHER THAN
|
// to include the query params OTHER THAN
|
||||||
// the signature so create a
|
// the signature so create a
|
||||||
|
@ -98,7 +87,6 @@ function check(request, log, data) {
|
||||||
credentialScope:
|
credentialScope:
|
||||||
`${scopeDate}/${region}/${service}/${requestType}`,
|
`${scopeDate}/${region}/${service}/${requestType}`,
|
||||||
awsService: service,
|
awsService: service,
|
||||||
proxyPath,
|
|
||||||
});
|
});
|
||||||
if (stringToSign instanceof Error) {
|
if (stringToSign instanceof Error) {
|
||||||
return { err: stringToSign };
|
return { err: stringToSign };
|
||||||
|
|
|
@ -1,551 +0,0 @@
|
||||||
const async = require('async');
|
|
||||||
|
|
||||||
const errors = require('../../lib/errors');
|
|
||||||
const RedisClient = require('../../lib/metrics/RedisClient');
|
|
||||||
const StatsModel = require('../../lib/metrics/StatsModel');
|
|
||||||
const INTERVAL = 300; // 5 minutes
|
|
||||||
const EXPIRY = 86400; // 24 hours
|
|
||||||
const THROUGHPUT_EXPIRY = 900; // 15 minutes
|
|
||||||
const isTest = process.env.CI === 'true';
|
|
||||||
|
|
||||||
class Metrics {
|
|
||||||
constructor(config, logger) {
|
|
||||||
const { redisConfig, validSites, internalStart } = config;
|
|
||||||
this._logger = logger;
|
|
||||||
this._redisClient = new RedisClient(redisConfig, this._logger);
|
|
||||||
// Redis expiry increased by an additional interval so we can reference
|
|
||||||
// the immediate older data for average throughput calculation
|
|
||||||
this._statsClient = new StatsModel(this._redisClient, INTERVAL, EXPIRY);
|
|
||||||
this._validSites = validSites;
|
|
||||||
this._internalStart = internalStart;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Query StatsClient for all ops given
|
|
||||||
* @param {array} ops - array of redis key names to query
|
|
||||||
* @param {string} site - site name or '*' wildcard
|
|
||||||
* @param {string} bucketName - the name of the bucket
|
|
||||||
* @param {string} objectKey - the object key name
|
|
||||||
* @param {string} versionId - the object version ID
|
|
||||||
* @param {function} cb - callback(err, res)
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
_queryStats(ops, site, bucketName, objectKey, versionId, cb) {
|
|
||||||
return async.map(ops, (op, done) => {
|
|
||||||
const hasGlobalKey = this._hasGlobalKey(op);
|
|
||||||
if (site === 'all') {
|
|
||||||
const queryString = hasGlobalKey ? `*:${op}` : `*:${op}:*`;
|
|
||||||
return this._redisClient.scan(queryString, undefined,
|
|
||||||
(err, res) => {
|
|
||||||
if (err) {
|
|
||||||
// escalate error to log later
|
|
||||||
return done({
|
|
||||||
message: `Redis error: ${err.message}`,
|
|
||||||
type: errors.InternalError,
|
|
||||||
method: 'Metrics._queryStats',
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if (hasGlobalKey) {
|
|
||||||
return this._statsClient.getAllGlobalStats(res,
|
|
||||||
this._logger, done);
|
|
||||||
}
|
|
||||||
const allKeys = res.map(key => {
|
|
||||||
const arr = key.split(':');
|
|
||||||
// Remove the "requests:<timestamp>" and process
|
|
||||||
return arr.slice(0, arr.length - 2).join(':');
|
|
||||||
});
|
|
||||||
const reducedKeys = [...new Set(allKeys)];
|
|
||||||
|
|
||||||
return this._statsClient.getAllStats(this._logger,
|
|
||||||
reducedKeys, done);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
// Query only a single given site or storage class
|
|
||||||
// First, validate the site or storage class
|
|
||||||
if (!this._validSites.includes(site)) {
|
|
||||||
// escalate error to log later
|
|
||||||
return done({
|
|
||||||
message: 'invalid site name provided',
|
|
||||||
type: errors.RouteNotFound,
|
|
||||||
method: 'Metrics._queryStats',
|
|
||||||
});
|
|
||||||
}
|
|
||||||
let queryString;
|
|
||||||
if (bucketName && objectKey && versionId) {
|
|
||||||
queryString =
|
|
||||||
`${site}:${bucketName}:${objectKey}:${versionId}:${op}`;
|
|
||||||
} else {
|
|
||||||
queryString = `${site}:${op}`;
|
|
||||||
}
|
|
||||||
if (hasGlobalKey) {
|
|
||||||
return this._redisClient.get(queryString, (err, res) => {
|
|
||||||
if (err) {
|
|
||||||
return done({
|
|
||||||
message: `Redis error: ${err.message}`,
|
|
||||||
type: errors.InternalError,
|
|
||||||
method: 'Metrics._queryStats',
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return done(null, { requests: [res || 0] });
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return this._statsClient.getStats(this._logger, queryString, done);
|
|
||||||
}, cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Determines whether the Redis op uses a global counter or interval key.
|
|
||||||
* @param {String} op - The Redis operation
|
|
||||||
* @return {Boolean} true if a global counter, false otherwise
|
|
||||||
*/
|
|
||||||
_hasGlobalKey(op) {
|
|
||||||
if (isTest) {
|
|
||||||
return op.includes('test:bb:bytespending') ||
|
|
||||||
op.includes('test:bb:opspending');
|
|
||||||
}
|
|
||||||
return op.includes('bb:crr:bytespending') ||
|
|
||||||
op.includes('bb:crr:opspending');
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get data points which are the keys used to query Redis
|
|
||||||
* @param {object} details - route details from lib/backbeat/routes.js
|
|
||||||
* @param {array} data - provides already fetched data in order of
|
|
||||||
* dataPoints mentioned for each route in lib/backbeat/routes.js. This can
|
|
||||||
* be undefined.
|
|
||||||
* @param {function} cb - callback(error, data), where data returns
|
|
||||||
* data stored in Redis.
|
|
||||||
* @return {array} dataPoints array defined in lib/backbeat/routes.js
|
|
||||||
*/
|
|
||||||
_getData(details, data, cb) {
|
|
||||||
if (!data) {
|
|
||||||
const { dataPoints, site, bucketName, objectKey,
|
|
||||||
versionId } = details;
|
|
||||||
return this._queryStats(dataPoints, site, bucketName, objectKey,
|
|
||||||
versionId, cb);
|
|
||||||
}
|
|
||||||
return cb(null, data);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Uptime of server based on this._internalStart up to max of expiry
|
|
||||||
* @param {number} expiry - max expiry
|
|
||||||
* @return {number} uptime of server up to expiry time
|
|
||||||
*/
|
|
||||||
_getMaxUptime(expiry) {
|
|
||||||
let secondsSinceStart = (Date.now() - this._internalStart) / 1000;
|
|
||||||
// allow only a minimum value of 1 for uptime
|
|
||||||
if (secondsSinceStart < 1) {
|
|
||||||
secondsSinceStart = 1;
|
|
||||||
}
|
|
||||||
return secondsSinceStart < expiry ? secondsSinceStart : expiry;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get replication backlog in ops count and size in bytes
|
|
||||||
* @param {object} details - route details from lib/backbeat/routes.js
|
|
||||||
* @param {function} cb - callback(error, data)
|
|
||||||
* @param {array} data - optional field providing already fetched data in
|
|
||||||
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
getBacklog(details, cb, data) {
|
|
||||||
this._getData(details, data, (err, res) => {
|
|
||||||
if (err && err.type) {
|
|
||||||
this._logger.error('error getting metric: backlog', {
|
|
||||||
origin: err.method,
|
|
||||||
method: 'Metrics.getBacklog',
|
|
||||||
});
|
|
||||||
return cb(err.type.customizeDescription(err.message));
|
|
||||||
}
|
|
||||||
if (err || res.length !== details.dataPoints.length) {
|
|
||||||
this._logger.error('error getting metrics: backlog', {
|
|
||||||
method: 'Metrics.getBacklog',
|
|
||||||
});
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
const count = Number.parseInt(res[0].requests, 10);
|
|
||||||
const size = Number.parseInt(res[1].requests, 10);
|
|
||||||
const response = {
|
|
||||||
backlog: {
|
|
||||||
description: 'Number of incomplete replication ' +
|
|
||||||
'operations (count) and number of incomplete bytes ' +
|
|
||||||
'transferred (size)',
|
|
||||||
results: {
|
|
||||||
count: count < 0 ? 0 : count,
|
|
||||||
size: size < 0 ? 0 : size,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
return cb(null, response);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get completed replicated stats by ops count and size in bytes
|
|
||||||
* @param {object} details - route details from lib/backbeat/routes.js
|
|
||||||
* @param {function} cb - callback(error, data)
|
|
||||||
* @param {array} data - optional field providing already fetched data in
|
|
||||||
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
getCompletions(details, cb, data) {
|
|
||||||
this._getData(details, data, (err, res) => {
|
|
||||||
if (err && err.type) {
|
|
||||||
this._logger.error('error getting metric: completions', {
|
|
||||||
origin: err.method,
|
|
||||||
method: 'Metrics.getCompletions',
|
|
||||||
});
|
|
||||||
return cb(err.type.customizeDescription(err.message));
|
|
||||||
}
|
|
||||||
if (err || res.length !== details.dataPoints.length) {
|
|
||||||
this._logger.error('error getting metrics: completions', {
|
|
||||||
method: 'Metrics.getCompletions',
|
|
||||||
});
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
const uptime = this._getMaxUptime(EXPIRY);
|
|
||||||
const numOfIntervals = Math.ceil(uptime / INTERVAL);
|
|
||||||
const [opsDone, bytesDone] = res.map(r => (
|
|
||||||
r.requests.slice(0, numOfIntervals).reduce((acc, i) =>
|
|
||||||
acc + i, 0)
|
|
||||||
));
|
|
||||||
|
|
||||||
const response = {
|
|
||||||
completions: {
|
|
||||||
description: 'Number of completed replication operations ' +
|
|
||||||
'(count) and number of bytes transferred (size) in ' +
|
|
||||||
`the last ${Math.floor(uptime)} seconds`,
|
|
||||||
results: {
|
|
||||||
count: opsDone,
|
|
||||||
size: bytesDone,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
return cb(null, response);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get failed replication stats by ops count and size in bytes
|
|
||||||
* @param {object} details - route details from lib/backbeat/routes.js
|
|
||||||
* @param {function} cb - callback(error, data)
|
|
||||||
* @param {array} data - optional field providing already fetched data in
|
|
||||||
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
getFailedMetrics(details, cb, data) {
|
|
||||||
this._getData(details, data, (err, res) => {
|
|
||||||
if (err && err.type) {
|
|
||||||
this._logger.error('error getting metric: failures', {
|
|
||||||
origin: err.emthod,
|
|
||||||
method: 'Metrics.getFailedMetrics',
|
|
||||||
});
|
|
||||||
return cb(err.type.customizeDescription(err.message));
|
|
||||||
}
|
|
||||||
if (err || res.length !== details.dataPoints.length) {
|
|
||||||
this._logger.error('error getting metrics: failures', {
|
|
||||||
method: 'Metrics.getFailedMetrics',
|
|
||||||
});
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
const uptime = this._getMaxUptime(EXPIRY);
|
|
||||||
const numOfIntervals = Math.ceil(uptime / INTERVAL);
|
|
||||||
const [opsFail, bytesFail] = res.map(r => (
|
|
||||||
r.requests.slice(0, numOfIntervals).reduce((acc, i) =>
|
|
||||||
acc + i, 0)
|
|
||||||
));
|
|
||||||
|
|
||||||
const response = {
|
|
||||||
failures: {
|
|
||||||
description: 'Number of failed replication operations ' +
|
|
||||||
'(count) and bytes (size) in the last ' +
|
|
||||||
`${Math.floor(uptime)} seconds`,
|
|
||||||
results: {
|
|
||||||
count: opsFail,
|
|
||||||
size: bytesFail,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
return cb(null, response);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get current throughput in ops/sec and bytes/sec up to max of 15 minutes
|
|
||||||
* Throughput is the number of units processed in a given time
|
|
||||||
* @param {object} details - route details from lib/backbeat/routes.js
|
|
||||||
* @param {function} cb - callback(error, data)
|
|
||||||
* @param {array} data - optional field providing already fetched data in
|
|
||||||
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
getThroughput(details, cb, data) {
|
|
||||||
this._getData(details, data, (err, res) => {
|
|
||||||
if (err && err.type) {
|
|
||||||
this._logger.error('error getting metric: throughput', {
|
|
||||||
origin: err.method,
|
|
||||||
method: 'Metrics.getThroughput',
|
|
||||||
});
|
|
||||||
return cb(err.type.customizeDescription(err.message));
|
|
||||||
}
|
|
||||||
if (err) {
|
|
||||||
this._logger.error('error getting metrics: throughput', {
|
|
||||||
method: 'Metrics.getThroughput',
|
|
||||||
});
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
const now = new Date();
|
|
||||||
const uptime = this._getMaxUptime(THROUGHPUT_EXPIRY);
|
|
||||||
const numOfIntervals = Math.ceil(uptime / INTERVAL);
|
|
||||||
const [opsThroughput, bytesThroughput] = res.map(r => {
|
|
||||||
let total = r.requests.slice(0, numOfIntervals).reduce(
|
|
||||||
(acc, i) => acc + i, 0);
|
|
||||||
|
|
||||||
// if uptime !== THROUGHPUT_EXPIRY, use internal timer and
|
|
||||||
// do not include the extra 4th interval
|
|
||||||
if (uptime === THROUGHPUT_EXPIRY) {
|
|
||||||
// all intervals apply, including 4th interval
|
|
||||||
const lastInterval =
|
|
||||||
this._statsClient._normalizeTimestamp(now);
|
|
||||||
// in seconds
|
|
||||||
const diff = (now - lastInterval) / 1000;
|
|
||||||
// Get average for last interval depending on time
|
|
||||||
// surpassed so far for newest interval
|
|
||||||
total += ((INTERVAL - diff) / INTERVAL) *
|
|
||||||
r.requests[numOfIntervals];
|
|
||||||
}
|
|
||||||
|
|
||||||
// Divide total by uptime to determine data per second
|
|
||||||
return (total / uptime);
|
|
||||||
});
|
|
||||||
|
|
||||||
const response = {
|
|
||||||
throughput: {
|
|
||||||
description: 'Current throughput for replication ' +
|
|
||||||
'operations in ops/sec (count) and bytes/sec (size) ' +
|
|
||||||
`in the last ${Math.floor(uptime)} seconds`,
|
|
||||||
results: {
|
|
||||||
count: opsThroughput.toFixed(2),
|
|
||||||
size: bytesThroughput.toFixed(2),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
return cb(null, response);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get current throughput for an object in bytes/sec. Throughput is the
|
|
||||||
* number of bytes transferred in a given time.
|
|
||||||
* @param {object} details - route details from lib/api/routes.js
|
|
||||||
* @param {function} cb - callback(error, data)
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
getObjectThroughput(details, cb) {
|
|
||||||
this._getData(details, undefined, (err, res) => {
|
|
||||||
if (err && err.type) {
|
|
||||||
this._logger.error('error getting metric: object throughput', {
|
|
||||||
origin: err.method,
|
|
||||||
method: 'Metrics.getObjectThroughput',
|
|
||||||
});
|
|
||||||
return cb(err.type.customizeDescription(err.message));
|
|
||||||
}
|
|
||||||
if (err) {
|
|
||||||
this._logger.error('error getting metrics: object throughput', {
|
|
||||||
method: 'Metrics.getObjectThroughput',
|
|
||||||
error: err.message,
|
|
||||||
});
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
const now = new Date();
|
|
||||||
const uptime = this._getMaxUptime(THROUGHPUT_EXPIRY);
|
|
||||||
const numOfIntervals = Math.ceil(uptime / INTERVAL);
|
|
||||||
const { requests } = res[0]; // Bytes done
|
|
||||||
let total = requests.slice(0, numOfIntervals)
|
|
||||||
.reduce((acc, i) => acc + i, 0);
|
|
||||||
// if uptime !== THROUGHPUT_EXPIRY, use internal timer
|
|
||||||
// and do not include the extra 4th interval
|
|
||||||
if (uptime === THROUGHPUT_EXPIRY) {
|
|
||||||
// all intervals apply, including 4th interval
|
|
||||||
const lastInterval =
|
|
||||||
this._statsClient._normalizeTimestamp(now);
|
|
||||||
// in seconds
|
|
||||||
const diff = (now - lastInterval) / 1000;
|
|
||||||
// Get average for last interval depending on time passed so
|
|
||||||
// far for newest interval
|
|
||||||
total += ((INTERVAL - diff) / INTERVAL) *
|
|
||||||
requests[numOfIntervals];
|
|
||||||
}
|
|
||||||
// Divide total by timeDisplay to determine data per second
|
|
||||||
const response = {
|
|
||||||
description: 'Current throughput for object replication in ' +
|
|
||||||
'bytes/sec (throughput)',
|
|
||||||
throughput: (total / uptime).toFixed(2),
|
|
||||||
};
|
|
||||||
return cb(null, response);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get CRR progress for an object in bytes. Progress is the percentage of
|
|
||||||
* the object that has completed replication.
|
|
||||||
* @param {object} details - route details from lib/api/routes.js
|
|
||||||
* @param {function} cb - callback(error, data)
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
getObjectProgress(details, cb) {
|
|
||||||
this._getData(details, undefined, (err, res) => {
|
|
||||||
if (err && err.type) {
|
|
||||||
this._logger.error('error getting metric: object progress', {
|
|
||||||
origin: err.method,
|
|
||||||
method: 'Metrics.getObjectProgress',
|
|
||||||
});
|
|
||||||
return cb(err.type.customizeDescription(err.message));
|
|
||||||
}
|
|
||||||
if (err || res.length !== details.dataPoints.length) {
|
|
||||||
this._logger.error('error getting metrics: object progress', {
|
|
||||||
method: 'Metrics.getObjectProgress',
|
|
||||||
error: err.message,
|
|
||||||
});
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
// Find if time since start is less than EXPIRY time
|
|
||||||
const uptime = this._getMaxUptime(EXPIRY);
|
|
||||||
const numOfIntervals = Math.ceil(uptime / INTERVAL);
|
|
||||||
const [totalBytesToComplete, bytesComplete] = res.map(r => (
|
|
||||||
r.requests.slice(0, numOfIntervals).reduce((acc, i) =>
|
|
||||||
acc + i, 0)
|
|
||||||
));
|
|
||||||
const ratio = totalBytesToComplete === 0 ? 0 :
|
|
||||||
bytesComplete / totalBytesToComplete;
|
|
||||||
const percentage = (ratio * 100).toFixed();
|
|
||||||
const response = {
|
|
||||||
description: 'Number of bytes to be replicated ' +
|
|
||||||
'(pending), number of bytes transferred to the ' +
|
|
||||||
'destination (completed), and percentage of the ' +
|
|
||||||
'object that has completed replication (progress)',
|
|
||||||
pending: totalBytesToComplete - bytesComplete,
|
|
||||||
completed: bytesComplete,
|
|
||||||
progress: `${percentage}%`,
|
|
||||||
};
|
|
||||||
return cb(null, response);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get pending replication stats by ops count and size in bytes
|
|
||||||
* @param {object} details - route details from lib/backbeat/routes.js
|
|
||||||
* @param {function} cb - callback(error, data)
|
|
||||||
* @param {array} data - optional field providing already fetched data in
|
|
||||||
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
getPending(details, cb, data) {
|
|
||||||
this._getData(details, data, (err, res) => {
|
|
||||||
if (err && err.type) {
|
|
||||||
this._logger.error('error getting metric: pending', {
|
|
||||||
origin: err.method,
|
|
||||||
method: 'Metrics.getPending',
|
|
||||||
});
|
|
||||||
return cb(err.type.customizeDescription(err.message));
|
|
||||||
}
|
|
||||||
const { dataPoints } = details;
|
|
||||||
if (err || res.length !== dataPoints.length) {
|
|
||||||
this._logger.error('error getting metrics: pending', {
|
|
||||||
method: 'Metrics.getPending',
|
|
||||||
error: err,
|
|
||||||
dataPoints,
|
|
||||||
res,
|
|
||||||
});
|
|
||||||
return cb(errors.InternalError
|
|
||||||
.customizeDescription('error getting metrics: pending'));
|
|
||||||
}
|
|
||||||
const count = Number.parseInt(res[0].requests, 10);
|
|
||||||
const size = Number.parseInt(res[1].requests, 10);
|
|
||||||
const response = {
|
|
||||||
pending: {
|
|
||||||
description: 'Number of pending replication ' +
|
|
||||||
'operations (count) and bytes (size)',
|
|
||||||
results: {
|
|
||||||
count: count < 0 ? 0 : count,
|
|
||||||
size: size < 0 ? 0 : size,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
return cb(null, response);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get all metrics
|
|
||||||
* @param {object} details - route details from lib/backbeat/routes.js
|
|
||||||
* @param {function} cb = callback(error, data)
|
|
||||||
* @param {array} data - optional field providing already fetched data in
|
|
||||||
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
getAllMetrics(details, cb, data) {
|
|
||||||
this._getData(details, data, (err, res) => {
|
|
||||||
if (err && err.type) {
|
|
||||||
this._logger.error('error getting metric: all', {
|
|
||||||
origin: err.method,
|
|
||||||
method: 'Metrics.getAllMetrics',
|
|
||||||
});
|
|
||||||
return cb(err.type.customizeDescription(err.message));
|
|
||||||
}
|
|
||||||
if (err || res.length !== details.dataPoints.length) {
|
|
||||||
this._logger.error('error getting metrics: all', {
|
|
||||||
method: 'Metrics.getAllMetrics',
|
|
||||||
});
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
// NOTE: Edited to fit failed metrics
|
|
||||||
const failMetricsDetails = Object.assign({}, details,
|
|
||||||
{ dataPoints: new Array(2) });
|
|
||||||
// res = [ ops, ops_done, ops_fail, bytes, bytes_done, bytes_fail,
|
|
||||||
// opsPending, bytesPending ]
|
|
||||||
return async.parallel([
|
|
||||||
done => this.getBacklog({ dataPoints: new Array(2) }, done,
|
|
||||||
[res[6], res[7]]),
|
|
||||||
done => this.getCompletions({ dataPoints: new Array(2) }, done,
|
|
||||||
[res[1], res[4]]),
|
|
||||||
done => this.getFailedMetrics(failMetricsDetails, done,
|
|
||||||
[res[2], res[5]]),
|
|
||||||
done => this.getThroughput({ dataPoints: new Array(2) }, done,
|
|
||||||
[res[1], res[4]]),
|
|
||||||
done => this.getPending({ dataPoints: new Array(2) }, done,
|
|
||||||
[res[6], res[7]]),
|
|
||||||
], (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
this._logger.error('error getting metrics: all', {
|
|
||||||
method: 'Metrics.getAllMetrics',
|
|
||||||
});
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
const store = Object.assign({}, ...results);
|
|
||||||
return cb(null, store);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Close redis client
|
|
||||||
* @param {function} cb - callback(error, data)
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
disconnect(cb) {
|
|
||||||
return this._redisClient.disconnect(cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Retrieve the list of redis client connectiosn
|
|
||||||
* @param {function} cb - callback(error, data)
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
listClients(cb) {
|
|
||||||
return this._redisClient.listClients(cb);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = Metrics;
|
|
|
@ -1,167 +0,0 @@
|
||||||
/*
|
|
||||||
This file contains Backbeat API routes and route details
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
* The metrics route model.
|
|
||||||
* @param {Object} redisKeys - The Redis keys used for Backbeat metrics
|
|
||||||
* @param {Array} allLocations - The list of replication location names
|
|
||||||
* @return {Array} The array of route objects
|
|
||||||
*/
|
|
||||||
function routes(redisKeys, allLocations) {
|
|
||||||
return [
|
|
||||||
// Route: /_/healthcheck
|
|
||||||
{
|
|
||||||
httpMethod: 'GET',
|
|
||||||
category: 'healthcheck',
|
|
||||||
type: 'basic',
|
|
||||||
method: 'getHealthcheck',
|
|
||||||
extensions: {},
|
|
||||||
},
|
|
||||||
// Route: /_/metrics/crr/<location>/pending
|
|
||||||
{
|
|
||||||
httpMethod: 'GET',
|
|
||||||
category: 'metrics',
|
|
||||||
type: 'pending',
|
|
||||||
extensions: { crr: [...allLocations, 'all'] },
|
|
||||||
method: 'getPending',
|
|
||||||
dataPoints: [redisKeys.opsPending, redisKeys.bytesPending],
|
|
||||||
},
|
|
||||||
// Route: /_/metrics/crr/<location>/backlog
|
|
||||||
{
|
|
||||||
httpMethod: 'GET',
|
|
||||||
category: 'metrics',
|
|
||||||
type: 'backlog',
|
|
||||||
extensions: { crr: [...allLocations, 'all'] },
|
|
||||||
method: 'getBacklog',
|
|
||||||
dataPoints: [redisKeys.opsPending, redisKeys.bytesPending],
|
|
||||||
},
|
|
||||||
// Route: /_/metrics/crr/<location>/completions
|
|
||||||
{
|
|
||||||
httpMethod: 'GET',
|
|
||||||
category: 'metrics',
|
|
||||||
type: 'completions',
|
|
||||||
extensions: { crr: [...allLocations, 'all'] },
|
|
||||||
method: 'getCompletions',
|
|
||||||
dataPoints: [redisKeys.opsDone, redisKeys.bytesDone],
|
|
||||||
},
|
|
||||||
// Route: /_/metrics/crr/<location>/failures
|
|
||||||
{
|
|
||||||
httpMethod: 'GET',
|
|
||||||
category: 'metrics',
|
|
||||||
type: 'failures',
|
|
||||||
extensions: { crr: [...allLocations, 'all'] },
|
|
||||||
method: 'getFailedMetrics',
|
|
||||||
dataPoints: [redisKeys.opsFail, redisKeys.bytesFail],
|
|
||||||
},
|
|
||||||
// Route: /_/metrics/crr/<location>/throughput
|
|
||||||
{
|
|
||||||
httpMethod: 'GET',
|
|
||||||
category: 'metrics',
|
|
||||||
type: 'throughput',
|
|
||||||
extensions: { crr: [...allLocations, 'all'] },
|
|
||||||
method: 'getThroughput',
|
|
||||||
dataPoints: [redisKeys.opsDone, redisKeys.bytesDone],
|
|
||||||
},
|
|
||||||
// Route: /_/metrics/crr/<location>/all
|
|
||||||
{
|
|
||||||
httpMethod: 'GET',
|
|
||||||
category: 'metrics',
|
|
||||||
type: 'all',
|
|
||||||
extensions: { crr: [...allLocations, 'all'] },
|
|
||||||
method: 'getAllMetrics',
|
|
||||||
dataPoints: [redisKeys.ops, redisKeys.opsDone, redisKeys.opsFail,
|
|
||||||
redisKeys.bytes, redisKeys.bytesDone, redisKeys.bytesFail,
|
|
||||||
redisKeys.opsPending, redisKeys.bytesPending],
|
|
||||||
},
|
|
||||||
// Route: /_/metrics/crr/<site>/progress/<bucket>/<key>
|
|
||||||
{
|
|
||||||
httpMethod: 'GET',
|
|
||||||
category: 'metrics',
|
|
||||||
type: 'progress',
|
|
||||||
level: 'object',
|
|
||||||
extensions: { crr: [...allLocations] },
|
|
||||||
method: 'getObjectProgress',
|
|
||||||
dataPoints: [redisKeys.objectBytes, redisKeys.objectBytesDone],
|
|
||||||
},
|
|
||||||
// Route: /_/metrics/crr/<site>/throughput/<bucket>/<key>
|
|
||||||
{
|
|
||||||
httpMethod: 'GET',
|
|
||||||
category: 'metrics',
|
|
||||||
type: 'throughput',
|
|
||||||
level: 'object',
|
|
||||||
extensions: { crr: [...allLocations] },
|
|
||||||
method: 'getObjectThroughput',
|
|
||||||
dataPoints: [redisKeys.objectBytesDone],
|
|
||||||
},
|
|
||||||
// Route: /_/crr/failed?marker=<marker>
|
|
||||||
{
|
|
||||||
httpMethod: 'GET',
|
|
||||||
type: 'all',
|
|
||||||
extensions: { crr: ['failed'] },
|
|
||||||
method: 'getAllFailedCRR',
|
|
||||||
},
|
|
||||||
// Route: /_/crr/failed/<bucket>/<key>/<versionId>
|
|
||||||
{
|
|
||||||
httpMethod: 'GET',
|
|
||||||
type: 'specific',
|
|
||||||
extensions: { crr: ['failed'] },
|
|
||||||
method: 'getFailedCRR',
|
|
||||||
},
|
|
||||||
// Route: /_/crr/failed
|
|
||||||
{
|
|
||||||
httpMethod: 'POST',
|
|
||||||
type: 'all',
|
|
||||||
extensions: { crr: ['failed'] },
|
|
||||||
method: 'retryFailedCRR',
|
|
||||||
},
|
|
||||||
// Route: /_/monitoring/metrics
|
|
||||||
{
|
|
||||||
httpMethod: 'GET',
|
|
||||||
category: 'monitoring',
|
|
||||||
type: 'metrics',
|
|
||||||
extensions: {},
|
|
||||||
method: 'monitoringHandler',
|
|
||||||
},
|
|
||||||
// Route: /_/crr/pause/<location>
|
|
||||||
// Where <location> is an optional field
|
|
||||||
{
|
|
||||||
httpMethod: 'POST',
|
|
||||||
type: 'pause',
|
|
||||||
extensions: { crr: [...allLocations, 'all'] },
|
|
||||||
method: 'pauseCRRService',
|
|
||||||
},
|
|
||||||
// Route: /_/crr/resume/<location>
|
|
||||||
// Route: /_/crr/resume/<location>/schedule
|
|
||||||
// Where <location> is an optional field unless "schedule" route
|
|
||||||
{
|
|
||||||
httpMethod: 'POST',
|
|
||||||
type: 'resume',
|
|
||||||
extensions: { crr: [...allLocations, 'all'] },
|
|
||||||
method: 'resumeCRRService',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
httpMethod: 'DELETE',
|
|
||||||
type: 'resume',
|
|
||||||
extensions: { crr: [...allLocations, 'all'] },
|
|
||||||
method: 'deleteScheduledResumeService',
|
|
||||||
},
|
|
||||||
// Route: /_/crr/resume/<location>
|
|
||||||
{
|
|
||||||
httpMethod: 'GET',
|
|
||||||
type: 'resume',
|
|
||||||
extensions: { crr: [...allLocations, 'all'] },
|
|
||||||
method: 'getResumeCRRSchedule',
|
|
||||||
},
|
|
||||||
// Route: /_/crr/status/<location>
|
|
||||||
// Where <location> is an optional field
|
|
||||||
{
|
|
||||||
httpMethod: 'GET',
|
|
||||||
type: 'status',
|
|
||||||
extensions: { crr: [...allLocations, 'all'] },
|
|
||||||
method: 'getCRRServiceStatus',
|
|
||||||
},
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = routes;
|
|
|
@ -72,10 +72,4 @@ module.exports = {
|
||||||
permittedCapitalizedBuckets: {
|
permittedCapitalizedBuckets: {
|
||||||
METADATA: true,
|
METADATA: true,
|
||||||
},
|
},
|
||||||
// Setting a lower object key limit to account for:
|
|
||||||
// - Mongo key limit of 1012 bytes
|
|
||||||
// - Version ID in Mongo Key if versioned of 33
|
|
||||||
// - Max bucket name length if bucket match false of 63
|
|
||||||
// - Extra prefix slash for bucket prefix if bucket match of 1
|
|
||||||
objectKeyByteLimit: 915,
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
function reshapeExceptionError(error) {
|
|
||||||
const { message, code, stack, name } = error;
|
|
||||||
return {
|
|
||||||
message,
|
|
||||||
code,
|
|
||||||
stack,
|
|
||||||
name,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
reshapeExceptionError,
|
|
||||||
};
|
|
|
@ -22,28 +22,6 @@ class RedisClient {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* scan a pattern and return matching keys
|
|
||||||
* @param {string} pattern - string pattern to match with all existing keys
|
|
||||||
* @param {number} [count=10] - scan count
|
|
||||||
* @param {callback} cb - callback (error, result)
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
scan(pattern, count = 10, cb) {
|
|
||||||
const params = { match: pattern, count };
|
|
||||||
const keys = [];
|
|
||||||
|
|
||||||
const stream = this._client.scanStream(params);
|
|
||||||
stream.on('data', resultKeys => {
|
|
||||||
for (let i = 0; i < resultKeys.length; i++) {
|
|
||||||
keys.push(resultKeys[i]);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
stream.on('end', () => {
|
|
||||||
cb(null, keys);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* increment value of a key by 1 and set a ttl
|
* increment value of a key by 1 and set a ttl
|
||||||
* @param {string} key - key holding the value
|
* @param {string} key - key holding the value
|
||||||
|
@ -57,38 +35,6 @@ class RedisClient {
|
||||||
.exec(cb);
|
.exec(cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* increment value of a key by a given amount
|
|
||||||
* @param {string} key - key holding the value
|
|
||||||
* @param {number} amount - amount to increase by
|
|
||||||
* @param {callback} cb - callback
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
incrby(key, amount, cb) {
|
|
||||||
return this._client.incrby(key, amount, cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* decrement value of a key by a given amount
|
|
||||||
* @param {string} key - key holding the value
|
|
||||||
* @param {number} amount - amount to increase by
|
|
||||||
* @param {callback} cb - callback
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
decrby(key, amount, cb) {
|
|
||||||
return this._client.decrby(key, amount, cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* get value stored at key
|
|
||||||
* @param {string} key - key holding the value
|
|
||||||
* @param {callback} cb - callback
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
get(key, cb) {
|
|
||||||
return this._client.get(key, cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* increment value of a key by a given amount and set a ttl
|
* increment value of a key by a given amount and set a ttl
|
||||||
* @param {string} key - key holding the value
|
* @param {string} key - key holding the value
|
||||||
|
@ -116,14 +62,6 @@ class RedisClient {
|
||||||
clear(cb) {
|
clear(cb) {
|
||||||
return this._client.flushdb(cb);
|
return this._client.flushdb(cb);
|
||||||
}
|
}
|
||||||
|
|
||||||
disconnect(cb) {
|
|
||||||
return this._client.quit(cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
listClients(cb) {
|
|
||||||
return this._client.client('list', cb);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = RedisClient;
|
module.exports = RedisClient;
|
||||||
|
|
|
@ -41,11 +41,11 @@ class StatsClient {
|
||||||
/**
|
/**
|
||||||
* build redis key to get total number of occurrences on the server
|
* build redis key to get total number of occurrences on the server
|
||||||
* @param {string} name - key name identifier
|
* @param {string} name - key name identifier
|
||||||
* @param {Date} date - Date instance
|
* @param {object} d - Date instance
|
||||||
* @return {string} key - key for redis
|
* @return {string} key - key for redis
|
||||||
*/
|
*/
|
||||||
buildKey(name, date) {
|
_buildKey(name, d) {
|
||||||
return `${name}:${this._normalizeTimestamp(date)}`;
|
return `${name}:${this._normalizeTimestamp(d)}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -85,35 +85,11 @@ class StatsClient {
|
||||||
amount = (typeof incr === 'number') ? incr : 1;
|
amount = (typeof incr === 'number') ? incr : 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
const key = this.buildKey(`${id}:requests`, new Date());
|
const key = this._buildKey(`${id}:requests`, new Date());
|
||||||
|
|
||||||
return this._redis.incrbyEx(key, amount, this._expiry, callback);
|
return this._redis.incrbyEx(key, amount, this._expiry, callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Increment the given key by the given value.
|
|
||||||
* @param {String} key - The Redis key to increment
|
|
||||||
* @param {Number} incr - The value to increment by
|
|
||||||
* @param {function} [cb] - callback
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
incrementKey(key, incr, cb) {
|
|
||||||
const callback = cb || this._noop;
|
|
||||||
return this._redis.incrby(key, incr, callback);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Decrement the given key by the given value.
|
|
||||||
* @param {String} key - The Redis key to decrement
|
|
||||||
* @param {Number} decr - The value to decrement by
|
|
||||||
* @param {function} [cb] - callback
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
decrementKey(key, decr, cb) {
|
|
||||||
const callback = cb || this._noop;
|
|
||||||
return this._redis.decrby(key, decr, callback);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* report/record a request that ended up being a 500 on the server
|
* report/record a request that ended up being a 500 on the server
|
||||||
* @param {string} id - service identifier
|
* @param {string} id - service identifier
|
||||||
|
@ -125,54 +101,10 @@ class StatsClient {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
const callback = cb || this._noop;
|
const callback = cb || this._noop;
|
||||||
const key = this.buildKey(`${id}:500s`, new Date());
|
const key = this._buildKey(`${id}:500s`, new Date());
|
||||||
return this._redis.incrEx(key, this._expiry, callback);
|
return this._redis.incrEx(key, this._expiry, callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* wrapper on `getStats` that handles a list of keys
|
|
||||||
* @param {object} log - Werelogs request logger
|
|
||||||
* @param {array} ids - service identifiers
|
|
||||||
* @param {callback} cb - callback to call with the err/result
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
getAllStats(log, ids, cb) {
|
|
||||||
if (!this._redis) {
|
|
||||||
return cb(null, {});
|
|
||||||
}
|
|
||||||
|
|
||||||
const statsRes = {
|
|
||||||
'requests': 0,
|
|
||||||
'500s': 0,
|
|
||||||
'sampleDuration': this._expiry,
|
|
||||||
};
|
|
||||||
let requests = 0;
|
|
||||||
let errors = 0;
|
|
||||||
|
|
||||||
// for now set concurrency to default of 10
|
|
||||||
return async.eachLimit(ids, 10, (id, done) => {
|
|
||||||
this.getStats(log, id, (err, res) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
requests += res.requests;
|
|
||||||
errors += res['500s'];
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
}, error => {
|
|
||||||
if (error) {
|
|
||||||
log.error('error getting stats', {
|
|
||||||
error,
|
|
||||||
method: 'StatsClient.getAllStats',
|
|
||||||
});
|
|
||||||
return cb(null, statsRes);
|
|
||||||
}
|
|
||||||
statsRes.requests = requests;
|
|
||||||
statsRes['500s'] = errors;
|
|
||||||
return cb(null, statsRes);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* get stats for the last x seconds, x being the sampling duration
|
* get stats for the last x seconds, x being the sampling duration
|
||||||
* @param {object} log - Werelogs request logger
|
* @param {object} log - Werelogs request logger
|
||||||
|
@ -189,8 +121,8 @@ class StatsClient {
|
||||||
const reqsKeys = [];
|
const reqsKeys = [];
|
||||||
const req500sKeys = [];
|
const req500sKeys = [];
|
||||||
for (let i = 0; i < totalKeys; i++) {
|
for (let i = 0; i < totalKeys; i++) {
|
||||||
reqsKeys.push(['get', this.buildKey(`${id}:requests`, d)]);
|
reqsKeys.push(['get', this._buildKey(`${id}:requests`, d)]);
|
||||||
req500sKeys.push(['get', this.buildKey(`${id}:500s`, d)]);
|
req500sKeys.push(['get', this._buildKey(`${id}:500s`, d)]);
|
||||||
this._setPrevInterval(d);
|
this._setPrevInterval(d);
|
||||||
}
|
}
|
||||||
return async.parallel([
|
return async.parallel([
|
||||||
|
|
|
@ -1,148 +0,0 @@
|
||||||
const async = require('async');
|
|
||||||
|
|
||||||
const StatsClient = require('./StatsClient');
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @class StatsModel
|
|
||||||
*
|
|
||||||
* @classdesc Extend and overwrite how timestamps are normalized by minutes
|
|
||||||
* rather than by seconds
|
|
||||||
*/
|
|
||||||
class StatsModel extends StatsClient {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Utility method to convert 2d array rows to columns, and vice versa
|
|
||||||
* See also: https://docs.ruby-lang.org/en/2.0.0/Array.html#method-i-zip
|
|
||||||
* @param {array} arrays - 2d array of integers
|
|
||||||
* @return {array} converted array
|
|
||||||
*/
|
|
||||||
_zip(arrays) {
|
|
||||||
if (arrays.length > 0 && arrays.every(a => Array.isArray(a))) {
|
|
||||||
return arrays[0].map((_, i) => arrays.map(a => a[i]));
|
|
||||||
}
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* normalize to the nearest interval
|
|
||||||
* @param {object} d - Date instance
|
|
||||||
* @return {number} timestamp - normalized to the nearest interval
|
|
||||||
*/
|
|
||||||
_normalizeTimestamp(d) {
|
|
||||||
const m = d.getMinutes();
|
|
||||||
return d.setMinutes(m - m % (Math.floor(this._interval / 60)), 0, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* override the method to get the count as an array of integers separated
|
|
||||||
* by each interval
|
|
||||||
* typical input looks like [[null, '1'], [null, '2'], [null, null]...]
|
|
||||||
* @param {array} arr - each index contains the result of each batch command
|
|
||||||
* where index 0 signifies the error and index 1 contains the result
|
|
||||||
* @return {array} array of integers, ordered from most recent interval to
|
|
||||||
* oldest interval with length of (expiry / interval)
|
|
||||||
*/
|
|
||||||
_getCount(arr) {
|
|
||||||
const size = Math.floor(this._expiry / this._interval);
|
|
||||||
const array = arr.reduce((store, i) => {
|
|
||||||
let num = parseInt(i[1], 10);
|
|
||||||
num = Number.isNaN(num) ? 0 : num;
|
|
||||||
store.push(num);
|
|
||||||
return store;
|
|
||||||
}, []);
|
|
||||||
|
|
||||||
if (array.length < size) {
|
|
||||||
array.push(...Array(size - array.length).fill(0));
|
|
||||||
}
|
|
||||||
return array;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* wrapper on `getStats` that handles a list of keys
|
|
||||||
* override the method to reduce the returned 2d array from `_getCount`
|
|
||||||
* @param {object} log - Werelogs request logger
|
|
||||||
* @param {array} ids - service identifiers
|
|
||||||
* @param {callback} cb - callback to call with the err/result
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
getAllStats(log, ids, cb) {
|
|
||||||
if (!this._redis) {
|
|
||||||
return cb(null, {});
|
|
||||||
}
|
|
||||||
|
|
||||||
const size = Math.floor(this._expiry / this._interval);
|
|
||||||
const statsRes = {
|
|
||||||
'requests': Array(size).fill(0),
|
|
||||||
'500s': Array(size).fill(0),
|
|
||||||
'sampleDuration': this._expiry,
|
|
||||||
};
|
|
||||||
const requests = [];
|
|
||||||
const errors = [];
|
|
||||||
|
|
||||||
if (ids.length === 0) {
|
|
||||||
return cb(null, statsRes);
|
|
||||||
}
|
|
||||||
|
|
||||||
// for now set concurrency to default of 10
|
|
||||||
return async.eachLimit(ids, 10, (id, done) => {
|
|
||||||
this.getStats(log, id, (err, res) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
requests.push(res.requests);
|
|
||||||
errors.push(res['500s']);
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
}, error => {
|
|
||||||
if (error) {
|
|
||||||
log.error('error getting stats', {
|
|
||||||
error,
|
|
||||||
method: 'StatsModel.getAllStats',
|
|
||||||
});
|
|
||||||
return cb(null, statsRes);
|
|
||||||
}
|
|
||||||
|
|
||||||
statsRes.requests = this._zip(requests).map(arr =>
|
|
||||||
arr.reduce((acc, i) => acc + i), 0);
|
|
||||||
statsRes['500s'] = this._zip(errors).map(arr =>
|
|
||||||
arr.reduce((acc, i) => acc + i), 0);
|
|
||||||
|
|
||||||
return cb(null, statsRes);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Handles getting a list of global keys.
|
|
||||||
* @param {array} ids - Service identifiers
|
|
||||||
* @param {object} log - Werelogs request logger
|
|
||||||
* @param {function} cb - Callback
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
getAllGlobalStats(ids, log, cb) {
|
|
||||||
const reqsKeys = ids.map(key => (['get', key]));
|
|
||||||
return this._redis.batch(reqsKeys, (err, res) => {
|
|
||||||
const statsRes = { requests: 0 };
|
|
||||||
if (err) {
|
|
||||||
log.error('error getting metrics', {
|
|
||||||
error: err,
|
|
||||||
method: 'StatsClient.getAllGlobalStats',
|
|
||||||
});
|
|
||||||
return cb(null, statsRes);
|
|
||||||
}
|
|
||||||
statsRes.requests = res.reduce((sum, curr) => {
|
|
||||||
const [cmdErr, val] = curr;
|
|
||||||
if (cmdErr) {
|
|
||||||
// Log any individual request errors from the batch request.
|
|
||||||
log.error('error getting metrics', {
|
|
||||||
error: cmdErr,
|
|
||||||
method: 'StatsClient.getAllGlobalStats',
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return sum + (Number.parseInt(val, 10) || 0);
|
|
||||||
}, 0);
|
|
||||||
return cb(null, statsRes);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = StatsModel;
|
|
|
@ -1,13 +1,10 @@
|
||||||
const assert = require('assert');
|
const assert = require('assert');
|
||||||
const uuid = require('uuid/v4');
|
|
||||||
|
|
||||||
const { WebsiteConfiguration } = require('./WebsiteConfiguration');
|
const { WebsiteConfiguration } = require('./WebsiteConfiguration');
|
||||||
const ReplicationConfiguration = require('./ReplicationConfiguration');
|
const ReplicationConfiguration = require('./ReplicationConfiguration');
|
||||||
const LifecycleConfiguration = require('./LifecycleConfiguration');
|
const LifecycleConfiguration = require('./LifecycleConfiguration');
|
||||||
|
|
||||||
// WHEN UPDATING THIS NUMBER, UPDATE MODELVERSION.MD CHANGELOG
|
// WHEN UPDATING THIS NUMBER, UPDATE MODELVERSION.MD CHANGELOG
|
||||||
// MODELVERSION.MD can be found in S3 repo: lib/metadata/ModelVersion.md
|
const modelVersion = 6;
|
||||||
const modelVersion = 9;
|
|
||||||
|
|
||||||
class BucketInfo {
|
class BucketInfo {
|
||||||
/**
|
/**
|
||||||
|
@ -50,17 +47,12 @@ class BucketInfo {
|
||||||
* @param {string[]} [cors[].exposeHeaders] - headers expose to applications
|
* @param {string[]} [cors[].exposeHeaders] - headers expose to applications
|
||||||
* @param {object} [replicationConfiguration] - replication configuration
|
* @param {object} [replicationConfiguration] - replication configuration
|
||||||
* @param {object} [lifecycleConfiguration] - lifecycle configuration
|
* @param {object} [lifecycleConfiguration] - lifecycle configuration
|
||||||
* @param {string} [uid] - unique identifier for the bucket, necessary
|
|
||||||
* @param {string} readLocationConstraint - readLocationConstraint for bucket
|
|
||||||
* addition for use with lifecycle operations
|
|
||||||
* @param {boolean} [isNFS] - whether the bucket is on NFS
|
|
||||||
*/
|
*/
|
||||||
constructor(name, owner, ownerDisplayName, creationDate,
|
constructor(name, owner, ownerDisplayName, creationDate,
|
||||||
mdBucketModelVersion, acl, transient, deleted,
|
mdBucketModelVersion, acl, transient, deleted,
|
||||||
serverSideEncryption, versioningConfiguration,
|
serverSideEncryption, versioningConfiguration,
|
||||||
locationConstraint, websiteConfiguration, cors,
|
locationConstraint, websiteConfiguration, cors,
|
||||||
replicationConfiguration, lifecycleConfiguration, uid,
|
replicationConfiguration, lifecycleConfiguration) {
|
||||||
readLocationConstraint, isNFS) {
|
|
||||||
assert.strictEqual(typeof name, 'string');
|
assert.strictEqual(typeof name, 'string');
|
||||||
assert.strictEqual(typeof owner, 'string');
|
assert.strictEqual(typeof owner, 'string');
|
||||||
assert.strictEqual(typeof ownerDisplayName, 'string');
|
assert.strictEqual(typeof ownerDisplayName, 'string');
|
||||||
|
@ -98,9 +90,6 @@ class BucketInfo {
|
||||||
if (locationConstraint) {
|
if (locationConstraint) {
|
||||||
assert.strictEqual(typeof locationConstraint, 'string');
|
assert.strictEqual(typeof locationConstraint, 'string');
|
||||||
}
|
}
|
||||||
if (readLocationConstraint) {
|
|
||||||
assert.strictEqual(typeof readLocationConstraint, 'string');
|
|
||||||
}
|
|
||||||
if (websiteConfiguration) {
|
if (websiteConfiguration) {
|
||||||
assert(websiteConfiguration instanceof WebsiteConfiguration);
|
assert(websiteConfiguration instanceof WebsiteConfiguration);
|
||||||
const { indexDocument, errorDocument, redirectAllRequestsTo,
|
const { indexDocument, errorDocument, redirectAllRequestsTo,
|
||||||
|
@ -123,10 +112,6 @@ class BucketInfo {
|
||||||
if (lifecycleConfiguration) {
|
if (lifecycleConfiguration) {
|
||||||
LifecycleConfiguration.validateConfig(lifecycleConfiguration);
|
LifecycleConfiguration.validateConfig(lifecycleConfiguration);
|
||||||
}
|
}
|
||||||
if (uid) {
|
|
||||||
assert.strictEqual(typeof uid, 'string');
|
|
||||||
assert.strictEqual(uid.length, 36);
|
|
||||||
}
|
|
||||||
const aclInstance = acl || {
|
const aclInstance = acl || {
|
||||||
Canned: 'private',
|
Canned: 'private',
|
||||||
FULL_CONTROL: [],
|
FULL_CONTROL: [],
|
||||||
|
@ -148,13 +133,10 @@ class BucketInfo {
|
||||||
this._serverSideEncryption = serverSideEncryption || null;
|
this._serverSideEncryption = serverSideEncryption || null;
|
||||||
this._versioningConfiguration = versioningConfiguration || null;
|
this._versioningConfiguration = versioningConfiguration || null;
|
||||||
this._locationConstraint = locationConstraint || null;
|
this._locationConstraint = locationConstraint || null;
|
||||||
this._readLocationConstraint = readLocationConstraint || null;
|
|
||||||
this._websiteConfiguration = websiteConfiguration || null;
|
this._websiteConfiguration = websiteConfiguration || null;
|
||||||
this._replicationConfiguration = replicationConfiguration || null;
|
this._replicationConfiguration = replicationConfiguration || null;
|
||||||
this._cors = cors || null;
|
this._cors = cors || null;
|
||||||
this._lifecycleConfiguration = lifecycleConfiguration || null;
|
this._lifecycleConfiguration = lifecycleConfiguration || null;
|
||||||
this._uid = uid || uuid();
|
|
||||||
this._isNFS = isNFS || null;
|
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
|
@ -174,13 +156,10 @@ class BucketInfo {
|
||||||
serverSideEncryption: this._serverSideEncryption,
|
serverSideEncryption: this._serverSideEncryption,
|
||||||
versioningConfiguration: this._versioningConfiguration,
|
versioningConfiguration: this._versioningConfiguration,
|
||||||
locationConstraint: this._locationConstraint,
|
locationConstraint: this._locationConstraint,
|
||||||
readLocationConstraint: this._readLocationConstraint,
|
|
||||||
websiteConfiguration: undefined,
|
websiteConfiguration: undefined,
|
||||||
cors: this._cors,
|
cors: this._cors,
|
||||||
replicationConfiguration: this._replicationConfiguration,
|
replicationConfiguration: this._replicationConfiguration,
|
||||||
lifecycleConfiguration: this._lifecycleConfiguration,
|
lifecycleConfiguration: this._lifecycleConfiguration,
|
||||||
uid: this._uid,
|
|
||||||
isNFS: this._isNFS,
|
|
||||||
};
|
};
|
||||||
if (this._websiteConfiguration) {
|
if (this._websiteConfiguration) {
|
||||||
bucketInfos.websiteConfiguration =
|
bucketInfos.websiteConfiguration =
|
||||||
|
@ -201,8 +180,7 @@ class BucketInfo {
|
||||||
obj.creationDate, obj.mdBucketModelVersion, obj.acl,
|
obj.creationDate, obj.mdBucketModelVersion, obj.acl,
|
||||||
obj.transient, obj.deleted, obj.serverSideEncryption,
|
obj.transient, obj.deleted, obj.serverSideEncryption,
|
||||||
obj.versioningConfiguration, obj.locationConstraint, websiteConfig,
|
obj.versioningConfiguration, obj.locationConstraint, websiteConfig,
|
||||||
obj.cors, obj.replicationConfiguration, obj.lifecycleConfiguration,
|
obj.cors, obj.replicationConfiguration, obj.lifecycleConfiguration);
|
||||||
obj.uid, obj.readLocationConstraint, obj.isNFS);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -225,8 +203,7 @@ class BucketInfo {
|
||||||
data._transient, data._deleted, data._serverSideEncryption,
|
data._transient, data._deleted, data._serverSideEncryption,
|
||||||
data._versioningConfiguration, data._locationConstraint,
|
data._versioningConfiguration, data._locationConstraint,
|
||||||
data._websiteConfiguration, data._cors,
|
data._websiteConfiguration, data._cors,
|
||||||
data._replicationConfiguration, data._lifecycleConfiguration,
|
data._replicationConfiguration, data._lifecycleConfiguration);
|
||||||
data._uid, data._readLocationConstraint, data._isNFS);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -472,17 +449,6 @@ class BucketInfo {
|
||||||
return this._locationConstraint;
|
return this._locationConstraint;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Get read location constraint.
|
|
||||||
* @return {string} - bucket read location constraint
|
|
||||||
*/
|
|
||||||
getReadLocationConstraint() {
|
|
||||||
if (this._readLocationConstraint) {
|
|
||||||
return this._readLocationConstraint;
|
|
||||||
}
|
|
||||||
return this._locationConstraint;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set Bucket model version
|
* Set Bucket model version
|
||||||
*
|
*
|
||||||
|
@ -555,29 +521,6 @@ class BucketInfo {
|
||||||
return this._versioningConfiguration &&
|
return this._versioningConfiguration &&
|
||||||
this._versioningConfiguration.Status === 'Enabled';
|
this._versioningConfiguration.Status === 'Enabled';
|
||||||
}
|
}
|
||||||
/**
|
|
||||||
* Get unique id of bucket.
|
|
||||||
* @return {string} - unique id
|
|
||||||
*/
|
|
||||||
getUid() {
|
|
||||||
return this._uid;
|
|
||||||
}
|
|
||||||
/**
|
|
||||||
* Check if the bucket is an NFS bucket.
|
|
||||||
* @return {boolean} - Wether the bucket is NFS or not
|
|
||||||
*/
|
|
||||||
isNFS() {
|
|
||||||
return this._isNFS;
|
|
||||||
}
|
|
||||||
/**
|
|
||||||
* Set whether the bucket is an NFS bucket.
|
|
||||||
* @param {boolean} isNFS - Wether the bucket is NFS or not
|
|
||||||
* @return {BucketInfo} - bucket info instance
|
|
||||||
*/
|
|
||||||
setIsNFS(isNFS) {
|
|
||||||
this._isNFS = isNFS;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = BucketInfo;
|
module.exports = BucketInfo;
|
||||||
|
|
|
@ -269,7 +269,7 @@ class LifecycleConfiguration {
|
||||||
return filterObj;
|
return filterObj;
|
||||||
}
|
}
|
||||||
if (filter.Tag) {
|
if (filter.Tag) {
|
||||||
const tagObj = this._parseTags(filter.Tag);
|
const tagObj = this._parseTags(filter.Tag[0]);
|
||||||
if (tagObj.error) {
|
if (tagObj.error) {
|
||||||
filterObj.error = tagObj.error;
|
filterObj.error = tagObj.error;
|
||||||
return filterObj;
|
return filterObj;
|
||||||
|
@ -287,7 +287,7 @@ class LifecycleConfiguration {
|
||||||
if (andF.Prefix && andF.Prefix.length >= 1) {
|
if (andF.Prefix && andF.Prefix.length >= 1) {
|
||||||
filterObj.rulePrefix = andF.Prefix.pop();
|
filterObj.rulePrefix = andF.Prefix.pop();
|
||||||
}
|
}
|
||||||
const tagObj = this._parseTags(andF.Tag);
|
const tagObj = this._parseTags(andF.Tag[0]);
|
||||||
if (tagObj.error) {
|
if (tagObj.error) {
|
||||||
filterObj.error = tagObj.error;
|
filterObj.error = tagObj.error;
|
||||||
return filterObj;
|
return filterObj;
|
||||||
|
@ -320,28 +320,31 @@ class LifecycleConfiguration {
|
||||||
// reset _tagKeys to empty because keys cannot overlap within a rule,
|
// reset _tagKeys to empty because keys cannot overlap within a rule,
|
||||||
// but different rules can have the same tag keys
|
// but different rules can have the same tag keys
|
||||||
this._tagKeys = [];
|
this._tagKeys = [];
|
||||||
for (let i = 0; i < tags.length; i++) {
|
if (!tags.Key || !tags.Value) {
|
||||||
if (!tags[i].Key || !tags[i].Value) {
|
tagObj.error = errors.MissingRequiredParameter.customizeDescription(
|
||||||
tagObj.error =
|
|
||||||
errors.MissingRequiredParameter.customizeDescription(
|
|
||||||
'Tag XML does not contain both Key and Value');
|
'Tag XML does not contain both Key and Value');
|
||||||
break;
|
return tagObj;
|
||||||
}
|
}
|
||||||
|
if (tags.Key.length !== tags.Value.length) {
|
||||||
if (tags[i].Key[0].length < 1 || tags[i].Key[0].length > 128) {
|
tagObj.error = errors.MalformedXML.customizeDescription(
|
||||||
|
'Tag XML should contain same number of Keys and Values');
|
||||||
|
return tagObj;
|
||||||
|
}
|
||||||
|
for (let i = 0; i < tags.Key.length; i++) {
|
||||||
|
if (tags.Key[i].length < 1 || tags.Key[i].length > 128) {
|
||||||
tagObj.error = errors.InvalidRequest.customizeDescription(
|
tagObj.error = errors.InvalidRequest.customizeDescription(
|
||||||
'Tag Key must be a length between 1 and 128 char');
|
'Tag Key must be a length between 1 and 128 char');
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (this._tagKeys.includes(tags[i].Key[0])) {
|
if (this._tagKeys.includes(tags.Key[i])) {
|
||||||
tagObj.error = errors.InvalidRequest.customizeDescription(
|
tagObj.error = errors.InvalidRequest.customizeDescription(
|
||||||
'Tag Keys must be unique');
|
'Tag Keys must be unique');
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
this._tagKeys.push(tags[i].Key[0]);
|
this._tagKeys.push(tags.Key[i]);
|
||||||
const tag = {
|
const tag = {
|
||||||
key: tags[i].Key[0],
|
key: tags.Key[i],
|
||||||
val: tags[i].Value[0],
|
val: tags.Value[i],
|
||||||
};
|
};
|
||||||
tagObj.tags.push(tag);
|
tagObj.tags.push(tag);
|
||||||
}
|
}
|
||||||
|
@ -674,12 +677,13 @@ class LifecycleConfiguration {
|
||||||
const Prefix = rulePrefix ? `<Prefix>${rulePrefix}</Prefix>` : '';
|
const Prefix = rulePrefix ? `<Prefix>${rulePrefix}</Prefix>` : '';
|
||||||
let tagXML = '';
|
let tagXML = '';
|
||||||
if (tags) {
|
if (tags) {
|
||||||
tagXML = tags.map(t => {
|
const keysVals = tags.map(t => {
|
||||||
const { key, val } = t;
|
const { key, val } = t;
|
||||||
const Tag = `<Tag><Key>${key}</Key>` +
|
const Tag = `<Key>${key}</Key>` +
|
||||||
`<Value>${val}</Value></Tag>`;
|
`<Value>${val}</Value>`;
|
||||||
return Tag;
|
return Tag;
|
||||||
}).join('');
|
}).join('');
|
||||||
|
tagXML = `<Tag>${keysVals}</Tag>`;
|
||||||
}
|
}
|
||||||
let Filter;
|
let Filter;
|
||||||
if (rulePrefix && !tags) {
|
if (rulePrefix && !tags) {
|
||||||
|
|
|
@ -120,7 +120,6 @@ class ObjectMD {
|
||||||
role: '',
|
role: '',
|
||||||
storageType: '',
|
storageType: '',
|
||||||
dataStoreVersionId: '',
|
dataStoreVersionId: '',
|
||||||
isNFS: null,
|
|
||||||
},
|
},
|
||||||
'dataStoreName': '',
|
'dataStoreName': '',
|
||||||
};
|
};
|
||||||
|
@ -676,11 +675,8 @@ class ObjectMD {
|
||||||
* @return {string} The encoded object versionId
|
* @return {string} The encoded object versionId
|
||||||
*/
|
*/
|
||||||
getEncodedVersionId() {
|
getEncodedVersionId() {
|
||||||
if (this.getVersionId()) {
|
|
||||||
return VersionIDUtils.encode(this.getVersionId());
|
return VersionIDUtils.encode(this.getVersionId());
|
||||||
}
|
}
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set tags
|
* Set tags
|
||||||
|
@ -710,7 +706,7 @@ class ObjectMD {
|
||||||
*/
|
*/
|
||||||
setReplicationInfo(replicationInfo) {
|
setReplicationInfo(replicationInfo) {
|
||||||
const { status, backends, content, destination, storageClass, role,
|
const { status, backends, content, destination, storageClass, role,
|
||||||
storageType, dataStoreVersionId, isNFS } = replicationInfo;
|
storageType, dataStoreVersionId } = replicationInfo;
|
||||||
this._data.replicationInfo = {
|
this._data.replicationInfo = {
|
||||||
status,
|
status,
|
||||||
backends,
|
backends,
|
||||||
|
@ -720,7 +716,6 @@ class ObjectMD {
|
||||||
role,
|
role,
|
||||||
storageType: storageType || '',
|
storageType: storageType || '',
|
||||||
dataStoreVersionId: dataStoreVersionId || '',
|
dataStoreVersionId: dataStoreVersionId || '',
|
||||||
isNFS: isNFS || null,
|
|
||||||
};
|
};
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
@ -739,24 +734,6 @@ class ObjectMD {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Set whether the replication is occurring from an NFS bucket.
|
|
||||||
* @param {Boolean} isNFS - Whether replication from an NFS bucket
|
|
||||||
* @return {ObjectMD} itself
|
|
||||||
*/
|
|
||||||
setReplicationIsNFS(isNFS) {
|
|
||||||
this._data.replicationInfo.isNFS = isNFS;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get whether the replication is occurring from an NFS bucket.
|
|
||||||
* @return {Boolean} Whether replication from an NFS bucket
|
|
||||||
*/
|
|
||||||
getReplicationIsNFS() {
|
|
||||||
return this._data.replicationInfo.isNFS;
|
|
||||||
}
|
|
||||||
|
|
||||||
setReplicationSiteStatus(site, status) {
|
setReplicationSiteStatus(site, status) {
|
||||||
const backend = this._data.replicationInfo.backends
|
const backend = this._data.replicationInfo.backends
|
||||||
.find(o => o.site === site);
|
.find(o => o.site === site);
|
||||||
|
@ -798,16 +775,6 @@ class ObjectMD {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
||||||
setReplicationBackends(backends) {
|
|
||||||
this._data.replicationInfo.backends = backends;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
setReplicationStorageClass(storageClass) {
|
|
||||||
this._data.replicationInfo.storageClass = storageClass;
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
|
|
||||||
getReplicationDataStoreVersionId() {
|
getReplicationDataStoreVersionId() {
|
||||||
return this._data.replicationInfo.dataStoreVersionId;
|
return this._data.replicationInfo.dataStoreVersionId;
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,7 +59,6 @@ class ReplicationConfiguration {
|
||||||
this._rules = null;
|
this._rules = null;
|
||||||
this._prevStorageClass = null;
|
this._prevStorageClass = null;
|
||||||
this._hasScalityDestination = null;
|
this._hasScalityDestination = null;
|
||||||
this._preferredReadLocation = null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -86,18 +85,6 @@ class ReplicationConfiguration {
|
||||||
return this._rules;
|
return this._rules;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* The preferred read location
|
|
||||||
* @return {string|null} - The preferred read location if defined,
|
|
||||||
* otherwise null
|
|
||||||
*
|
|
||||||
* FIXME ideally we should be able to specify one preferred read
|
|
||||||
* location for each rule
|
|
||||||
*/
|
|
||||||
getPreferredReadLocation() {
|
|
||||||
return this._preferredReadLocation;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the replication configuration
|
* Get the replication configuration
|
||||||
* @return {object} - The replication configuration
|
* @return {object} - The replication configuration
|
||||||
|
@ -107,7 +94,6 @@ class ReplicationConfiguration {
|
||||||
role: this.getRole(),
|
role: this.getRole(),
|
||||||
destination: this.getDestination(),
|
destination: this.getDestination(),
|
||||||
rules: this.getRules(),
|
rules: this.getRules(),
|
||||||
preferredReadLocation: this.getPreferredReadLocation(),
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -306,14 +292,6 @@ class ReplicationConfiguration {
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
const storageClasses = destination.StorageClass[0].split(',');
|
const storageClasses = destination.StorageClass[0].split(',');
|
||||||
const prefReadIndex = storageClasses.findIndex(storageClass =>
|
|
||||||
storageClass.endsWith(':preferred_read'));
|
|
||||||
if (prefReadIndex !== -1) {
|
|
||||||
const prefRead = storageClasses[prefReadIndex].split(':')[0];
|
|
||||||
// remove :preferred_read tag from storage class name
|
|
||||||
storageClasses[prefReadIndex] = prefRead;
|
|
||||||
this._preferredReadLocation = prefRead;
|
|
||||||
}
|
|
||||||
const isValidStorageClass = storageClasses.every(storageClass => {
|
const isValidStorageClass = storageClasses.every(storageClass => {
|
||||||
if (validStorageClasses.includes(storageClass)) {
|
if (validStorageClasses.includes(storageClass)) {
|
||||||
this._hasScalityDestination =
|
this._hasScalityDestination =
|
||||||
|
|
|
@ -1,97 +0,0 @@
|
||||||
const httpServer = require('../http/server');
|
|
||||||
const werelogs = require('werelogs');
|
|
||||||
const errors = require('../../errors');
|
|
||||||
|
|
||||||
function sendError(res, log, error, optMessage) {
|
|
||||||
res.writeHead(error.code);
|
|
||||||
let message;
|
|
||||||
if (optMessage) {
|
|
||||||
message = optMessage;
|
|
||||||
} else {
|
|
||||||
message = error.description || '';
|
|
||||||
}
|
|
||||||
log.debug('sending back error response', { httpCode: error.code,
|
|
||||||
errorType: error.message,
|
|
||||||
error: message });
|
|
||||||
res.end(`${JSON.stringify({ errorType: error.message,
|
|
||||||
errorMessage: message })}\n`);
|
|
||||||
}
|
|
||||||
|
|
||||||
function sendSuccess(res, log, msg) {
|
|
||||||
res.writeHead(200);
|
|
||||||
log.debug('replying with success');
|
|
||||||
const message = msg || 'OK';
|
|
||||||
res.end(message);
|
|
||||||
}
|
|
||||||
|
|
||||||
function constructEndpoints(ns, path) {
|
|
||||||
return `/${ns}/${path}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
function checkStub(log) { // eslint-disable-line
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
class HealthProbeServer extends httpServer {
|
|
||||||
constructor(params) {
|
|
||||||
const logging = new werelogs.Logger('HealthProbeServer');
|
|
||||||
super(params.port, logging);
|
|
||||||
this.logging = logging;
|
|
||||||
this.setBindAddress(params.bindAddress || 'localhost');
|
|
||||||
this._namespace = params.namespace || '_/health';
|
|
||||||
const livenessURI = constructEndpoints(this._namespace,
|
|
||||||
params.livenessURI || 'liveness');
|
|
||||||
const readinessURI = constructEndpoints(this._namespace,
|
|
||||||
params.readinessURI || 'readiness');
|
|
||||||
// hooking our request processing function by calling the
|
|
||||||
// parent's method for that
|
|
||||||
this.onRequest(this._onRequest);
|
|
||||||
this._reqHandlers = {};
|
|
||||||
this._reqHandlers[livenessURI] = this._onLiveness.bind(this);
|
|
||||||
this._reqHandlers[readinessURI] = this._onReadiness.bind(this);
|
|
||||||
this._livenessCheck = params.livenessCheck || checkStub;
|
|
||||||
this._readinessCheck = params.readinessCheck || checkStub;
|
|
||||||
}
|
|
||||||
|
|
||||||
onLiveCheck(f) {
|
|
||||||
this._livenessCheck = f;
|
|
||||||
}
|
|
||||||
|
|
||||||
onReadyCheck(f) {
|
|
||||||
this._readinessCheck = f;
|
|
||||||
}
|
|
||||||
|
|
||||||
_onRequest(req, res) {
|
|
||||||
const log = this.logging.newRequestLogger();
|
|
||||||
log.debug('request received', { method: req.method,
|
|
||||||
url: req.url });
|
|
||||||
if (req.method !== 'GET') {
|
|
||||||
sendError(res, log, errors.MethodNotAllowed);
|
|
||||||
}
|
|
||||||
if (req.url.startsWith(`/${this._namespace}`) &&
|
|
||||||
req.url in this._reqHandlers) {
|
|
||||||
this._reqHandlers[req.url](req, res, log);
|
|
||||||
} else {
|
|
||||||
sendError(res, log, errors.InvalidURI);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_onLiveness(req, res, log) {
|
|
||||||
if (this._livenessCheck(log)) {
|
|
||||||
sendSuccess(res, log);
|
|
||||||
} else {
|
|
||||||
sendError(res, log, errors.ServiceUnavailable);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_onReadiness(req, res, log) {
|
|
||||||
if (this._readinessCheck(log)) {
|
|
||||||
sendSuccess(res, log);
|
|
||||||
} else {
|
|
||||||
sendError(res, log, errors.ServiceUnavailable);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = HealthProbeServer;
|
|
|
@ -214,7 +214,7 @@ class RESTServer extends httpServer {
|
||||||
if (req.url.startsWith(`${constants.dataFileURL}?`)) {
|
if (req.url.startsWith(`${constants.dataFileURL}?`)) {
|
||||||
const queryParam = url.parse(req.url).query;
|
const queryParam = url.parse(req.url).query;
|
||||||
if (queryParam === 'diskUsage') {
|
if (queryParam === 'diskUsage') {
|
||||||
return this.dataStore.getDiskUsage((err, result) => {
|
this.dataStore.getDiskUsage((err, result) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return sendError(res, log, err);
|
return sendError(res, log, err);
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,31 +68,6 @@ function _checkUnmodifiedSince(ifUnmodifiedSinceTime, lastModified) {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* checks 'if-modified-since' and 'if-unmodified-since' headers if included in
|
|
||||||
* request against last-modified date of object
|
|
||||||
* @param {object} headers - headers from request object
|
|
||||||
* @param {string} lastModified - last modified date of object
|
|
||||||
* @return {object} contains modifiedSince and unmodifiedSince res objects
|
|
||||||
*/
|
|
||||||
function checkDateModifiedHeaders(headers, lastModified) {
|
|
||||||
let lastModifiedDate = new Date(lastModified);
|
|
||||||
lastModifiedDate.setMilliseconds(0);
|
|
||||||
lastModifiedDate = lastModifiedDate.getTime();
|
|
||||||
|
|
||||||
const ifModifiedSinceHeader = headers['if-modified-since'] ||
|
|
||||||
headers['x-amz-copy-source-if-modified-since'];
|
|
||||||
const ifUnmodifiedSinceHeader = headers['if-unmodified-since'] ||
|
|
||||||
headers['x-amz-copy-source-if-unmodified-since'];
|
|
||||||
|
|
||||||
const modifiedSinceRes = _checkModifiedSince(ifModifiedSinceHeader,
|
|
||||||
lastModifiedDate);
|
|
||||||
const unmodifiedSinceRes = _checkUnmodifiedSince(ifUnmodifiedSinceHeader,
|
|
||||||
lastModifiedDate);
|
|
||||||
|
|
||||||
return { modifiedSinceRes, unmodifiedSinceRes };
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* validateConditionalHeaders - validates 'if-modified-since',
|
* validateConditionalHeaders - validates 'if-modified-since',
|
||||||
* 'if-unmodified-since', 'if-match' or 'if-none-match' headers if included in
|
* 'if-unmodified-since', 'if-match' or 'if-none-match' headers if included in
|
||||||
|
@ -104,14 +79,23 @@ function checkDateModifiedHeaders(headers, lastModified) {
|
||||||
* empty object if no error
|
* empty object if no error
|
||||||
*/
|
*/
|
||||||
function validateConditionalHeaders(headers, lastModified, contentMD5) {
|
function validateConditionalHeaders(headers, lastModified, contentMD5) {
|
||||||
|
let lastModifiedDate = new Date(lastModified);
|
||||||
|
lastModifiedDate.setMilliseconds(0);
|
||||||
|
lastModifiedDate = lastModifiedDate.getTime();
|
||||||
const ifMatchHeader = headers['if-match'] ||
|
const ifMatchHeader = headers['if-match'] ||
|
||||||
headers['x-amz-copy-source-if-match'];
|
headers['x-amz-copy-source-if-match'];
|
||||||
const ifNoneMatchHeader = headers['if-none-match'] ||
|
const ifNoneMatchHeader = headers['if-none-match'] ||
|
||||||
headers['x-amz-copy-source-if-none-match'];
|
headers['x-amz-copy-source-if-none-match'];
|
||||||
|
const ifModifiedSinceHeader = headers['if-modified-since'] ||
|
||||||
|
headers['x-amz-copy-source-if-modified-since'];
|
||||||
|
const ifUnmodifiedSinceHeader = headers['if-unmodified-since'] ||
|
||||||
|
headers['x-amz-copy-source-if-unmodified-since'];
|
||||||
const etagMatchRes = _checkEtagMatch(ifMatchHeader, contentMD5);
|
const etagMatchRes = _checkEtagMatch(ifMatchHeader, contentMD5);
|
||||||
const etagNoneMatchRes = _checkEtagNoneMatch(ifNoneMatchHeader, contentMD5);
|
const etagNoneMatchRes = _checkEtagNoneMatch(ifNoneMatchHeader, contentMD5);
|
||||||
const { modifiedSinceRes, unmodifiedSinceRes } =
|
const modifiedSinceRes = _checkModifiedSince(ifModifiedSinceHeader,
|
||||||
checkDateModifiedHeaders(headers, lastModified);
|
lastModifiedDate);
|
||||||
|
const unmodifiedSinceRes = _checkUnmodifiedSince(ifUnmodifiedSinceHeader,
|
||||||
|
lastModifiedDate);
|
||||||
// If-Unmodified-Since condition evaluates to false and If-Match
|
// If-Unmodified-Since condition evaluates to false and If-Match
|
||||||
// is not present, then return the error. Otherwise, If-Unmodified-Since is
|
// is not present, then return the error. Otherwise, If-Unmodified-Since is
|
||||||
// silent when If-Match match, and when If-Match does not match, it's the
|
// silent when If-Match match, and when If-Match does not match, it's the
|
||||||
|
@ -136,6 +120,5 @@ module.exports = {
|
||||||
_checkEtagNoneMatch,
|
_checkEtagNoneMatch,
|
||||||
_checkModifiedSince,
|
_checkModifiedSince,
|
||||||
_checkUnmodifiedSince,
|
_checkUnmodifiedSince,
|
||||||
checkDateModifiedHeaders,
|
|
||||||
validateConditionalHeaders,
|
validateConditionalHeaders,
|
||||||
};
|
};
|
||||||
|
|
|
@ -10,8 +10,6 @@ const routeOPTIONS = require('./routes/routeOPTIONS');
|
||||||
const routesUtils = require('./routesUtils');
|
const routesUtils = require('./routesUtils');
|
||||||
const routeWebsite = require('./routes/routeWebsite');
|
const routeWebsite = require('./routes/routeWebsite');
|
||||||
|
|
||||||
const { objectKeyByteLimit } = require('../constants');
|
|
||||||
|
|
||||||
const routeMap = {
|
const routeMap = {
|
||||||
GET: routeGET,
|
GET: routeGET,
|
||||||
PUT: routePUT,
|
PUT: routePUT,
|
||||||
|
@ -54,14 +52,8 @@ function checkBucketAndKey(bucketName, objectKey, method, reqQuery,
|
||||||
blacklistedPrefixes.object);
|
blacklistedPrefixes.object);
|
||||||
if (!result.isValid) {
|
if (!result.isValid) {
|
||||||
log.debug('invalid object key', { objectKey });
|
log.debug('invalid object key', { objectKey });
|
||||||
if (result.invalidPrefix) {
|
return errors.InvalidArgument.customizeDescription('Object key ' +
|
||||||
return errors.InvalidArgument.customizeDescription('Invalid ' +
|
`must not start with "${result.invalidPrefix}".`);
|
||||||
'prefix - object key cannot start with ' +
|
|
||||||
`"${result.invalidPrefix}".`);
|
|
||||||
}
|
|
||||||
return errors.KeyTooLong.customizeDescription('Object key is too ' +
|
|
||||||
'long. Maximum number of bytes allowed in keys is ' +
|
|
||||||
`${objectKeyByteLimit}.`);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if ((reqQuery.partNumber || reqQuery.uploadId)
|
if ((reqQuery.partNumber || reqQuery.uploadId)
|
||||||
|
@ -172,8 +164,7 @@ function routes(req, res, params, logger) {
|
||||||
logger.newRequestLoggerFromSerializedUids(reqUids) :
|
logger.newRequestLoggerFromSerializedUids(reqUids) :
|
||||||
logger.newRequestLogger());
|
logger.newRequestLogger());
|
||||||
|
|
||||||
if (!req.url.startsWith('/_/healthcheck') &&
|
if (!req.url.startsWith('/_/healthcheck')) {
|
||||||
!req.url.startsWith('/_/report')) {
|
|
||||||
log.info('received request', clientInfo);
|
log.info('received request', clientInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,8 +4,6 @@ const errors = require('../errors');
|
||||||
const constants = require('../constants');
|
const constants = require('../constants');
|
||||||
const { eachSeries } = require('async');
|
const { eachSeries } = require('async');
|
||||||
|
|
||||||
const { objectKeyByteLimit } = require('../constants');
|
|
||||||
|
|
||||||
const responseErr = new Error();
|
const responseErr = new Error();
|
||||||
responseErr.code = 'ResponseError';
|
responseErr.code = 'ResponseError';
|
||||||
responseErr.message = 'response closed by client request before all data sent';
|
responseErr.message = 'response closed by client request before all data sent';
|
||||||
|
@ -286,19 +284,13 @@ function retrieveData(locations, retrieveDataFn, response, log) {
|
||||||
response.destroy();
|
response.destroy();
|
||||||
responseDestroyed = true;
|
responseDestroyed = true;
|
||||||
};
|
};
|
||||||
|
|
||||||
const _destroyReadable = readable => {
|
|
||||||
// s3-data sends Readable stream only which does not implement destroy
|
|
||||||
if (readable && readable.destroy) {
|
|
||||||
readable.destroy();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// the S3-client might close the connection while we are processing it
|
// the S3-client might close the connection while we are processing it
|
||||||
response.once('close', () => {
|
response.once('close', () => {
|
||||||
log.debug('received close event before response end');
|
log.debug('received close event before response end');
|
||||||
responseDestroyed = true;
|
responseDestroyed = true;
|
||||||
_destroyReadable(currentStream);
|
if (currentStream) {
|
||||||
|
currentStream.destroy();
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
return eachSeries(locations,
|
return eachSeries(locations,
|
||||||
|
@ -319,7 +311,7 @@ function retrieveData(locations, retrieveDataFn, response, log) {
|
||||||
if (responseDestroyed || response.isclosed) {
|
if (responseDestroyed || response.isclosed) {
|
||||||
log.debug(
|
log.debug(
|
||||||
'response destroyed before readable could stream');
|
'response destroyed before readable could stream');
|
||||||
_destroyReadable(readable);
|
readable.destroy();
|
||||||
return next(responseErr);
|
return next(responseErr);
|
||||||
}
|
}
|
||||||
// readable stream successfully consumed
|
// readable stream successfully consumed
|
||||||
|
@ -876,9 +868,6 @@ const routesUtils = {
|
||||||
if (invalidPrefix) {
|
if (invalidPrefix) {
|
||||||
return { isValid: false, invalidPrefix };
|
return { isValid: false, invalidPrefix };
|
||||||
}
|
}
|
||||||
if (Buffer.byteLength(objectKey, 'utf8') > objectKeyByteLimit) {
|
|
||||||
return { isValid: false };
|
|
||||||
}
|
|
||||||
return { isValid: true };
|
return { isValid: true };
|
||||||
},
|
},
|
||||||
|
|
||||||
|
|
|
@ -5,7 +5,6 @@ const crypto = require('crypto');
|
||||||
const async = require('async');
|
const async = require('async');
|
||||||
const diskusage = require('diskusage');
|
const diskusage = require('diskusage');
|
||||||
const werelogs = require('werelogs');
|
const werelogs = require('werelogs');
|
||||||
const posixFadvise = require('fcntl');
|
|
||||||
|
|
||||||
const errors = require('../../../errors');
|
const errors = require('../../../errors');
|
||||||
const stringHash = require('../../../stringHash');
|
const stringHash = require('../../../stringHash');
|
||||||
|
@ -130,7 +129,6 @@ class DataFileStore {
|
||||||
log.debug('starting to write data', { method: 'put', key, filePath });
|
log.debug('starting to write data', { method: 'put', key, filePath });
|
||||||
dataStream.pause();
|
dataStream.pause();
|
||||||
fs.open(filePath, 'wx', (err, fd) => {
|
fs.open(filePath, 'wx', (err, fd) => {
|
||||||
let ret = 0;
|
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error opening filePath',
|
log.error('error opening filePath',
|
||||||
{ method: 'put', key, filePath, error: err });
|
{ method: 'put', key, filePath, error: err });
|
||||||
|
@ -155,19 +153,6 @@ class DataFileStore {
|
||||||
return ok();
|
return ok();
|
||||||
}
|
}
|
||||||
fs.fsync(fd, err => {
|
fs.fsync(fd, err => {
|
||||||
/*
|
|
||||||
* Disabling the caching of stored files is
|
|
||||||
* temporary fix for
|
|
||||||
* https://github.com/kubernetes/kubernetes/issues/43916
|
|
||||||
* that causes cache memory to be accounted as RSS memory
|
|
||||||
* for the pod and can potentially cause the pod
|
|
||||||
* to be killed under memory pressure:
|
|
||||||
*/
|
|
||||||
ret = posixFadvise(fd, 0, size, 4);
|
|
||||||
if (ret !== 0) {
|
|
||||||
log.warning(
|
|
||||||
`error fadv_dontneed ${filePath} returned ${ret}`);
|
|
||||||
}
|
|
||||||
fs.close(fd);
|
fs.close(fd);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('fsync error',
|
log.error('fsync error',
|
||||||
|
|
|
@ -87,7 +87,6 @@ class MetadataWrapper {
|
||||||
database: params.mongodb.database,
|
database: params.mongodb.database,
|
||||||
replicationGroupId: params.replicationGroupId,
|
replicationGroupId: params.replicationGroupId,
|
||||||
path: params.mongodb.path,
|
path: params.mongodb.path,
|
||||||
config: params.config,
|
|
||||||
logger,
|
logger,
|
||||||
});
|
});
|
||||||
this.implName = 'mongoclient';
|
this.implName = 'mongoclient';
|
||||||
|
@ -110,7 +109,7 @@ class MetadataWrapper {
|
||||||
if (this.client.setup) {
|
if (this.client.setup) {
|
||||||
return this.client.setup(done);
|
return this.client.setup(done);
|
||||||
}
|
}
|
||||||
return process.nextTick(done);
|
return process.nextTick(() => done);
|
||||||
}
|
}
|
||||||
|
|
||||||
createBucket(bucketName, bucketMD, log, cb) {
|
createBucket(bucketName, bucketMD, log, cb) {
|
||||||
|
|
|
@ -1,421 +0,0 @@
|
||||||
const cluster = require('cluster');
|
|
||||||
const async = require('async');
|
|
||||||
|
|
||||||
const errors = require('../../../errors');
|
|
||||||
|
|
||||||
const BucketInfo = require('../../../models/BucketInfo');
|
|
||||||
|
|
||||||
const list = require('../../../algos/list/exportAlgos');
|
|
||||||
|
|
||||||
const MetadataFileClient = require('./MetadataFileClient');
|
|
||||||
const versionSep =
|
|
||||||
require('../../../versioning/constants')
|
|
||||||
.VersioningConstants.VersionId.Separator;
|
|
||||||
|
|
||||||
const METASTORE = '__metastore';
|
|
||||||
|
|
||||||
const itemScanRefreshDelay = 1000 * 30 * 60; // 30 minutes
|
|
||||||
|
|
||||||
class BucketFileInterface {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @constructor
|
|
||||||
* @param {object} [params] - constructor params
|
|
||||||
* @param {boolean} [params.noDbOpen=false] - true to skip DB open
|
|
||||||
* @param {object} logger - logger
|
|
||||||
* (for unit tests only)
|
|
||||||
*/
|
|
||||||
constructor(params, logger) {
|
|
||||||
this.logger = logger;
|
|
||||||
const { host, port } = params.metadataClient;
|
|
||||||
this.constants = params.constants;
|
|
||||||
this.mdClient = new MetadataFileClient({ host, port });
|
|
||||||
if (params && params.noDbOpen) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
this.lastItemScanTime = null;
|
|
||||||
this.lastItemScanResult = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
setup(done) {
|
|
||||||
return this.mdClient.openDB((err, value) => {
|
|
||||||
if (err) {
|
|
||||||
throw err;
|
|
||||||
}
|
|
||||||
// the metastore sublevel is used to store bucket attributes
|
|
||||||
this.mdDB = value;
|
|
||||||
this.metastore = this.mdDB.openSub(METASTORE);
|
|
||||||
if (cluster.isMaster) {
|
|
||||||
this.setupMetadataServer(done);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
setupMetadataServer(done) {
|
|
||||||
/* Since the bucket creation API is expecting the
|
|
||||||
usersBucket to have attributes, we pre-create the
|
|
||||||
usersBucket attributes here */
|
|
||||||
this.mdClient.logger.debug('setting up metadata server');
|
|
||||||
const usersBucketAttr = new BucketInfo(this.constants.usersBucket,
|
|
||||||
'admin', 'admin', new Date().toJSON(),
|
|
||||||
BucketInfo.currentModelVersion());
|
|
||||||
return this.metastore.put(
|
|
||||||
this.constants.usersBucket,
|
|
||||||
usersBucketAttr.serialize(), {}, err => {
|
|
||||||
if (err) {
|
|
||||||
this.logger.fatal('error writing usersBucket ' +
|
|
||||||
'attributes to metadata',
|
|
||||||
{ error: err });
|
|
||||||
throw (errors.InternalError);
|
|
||||||
}
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Load DB if exists
|
|
||||||
* @param {String} bucketName - name of bucket
|
|
||||||
* @param {Object} log - logger
|
|
||||||
* @param {function} cb - callback(err, db, attr)
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
loadDBIfExists(bucketName, log, cb) {
|
|
||||||
this.getBucketAttributes(bucketName, log, (err, attr) => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
const db = this.mdDB.openSub(bucketName);
|
|
||||||
return cb(null, db, attr);
|
|
||||||
} catch (err) {
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
createBucket(bucketName, bucketMD, log, cb) {
|
|
||||||
this.getBucketAttributes(bucketName, log, err => {
|
|
||||||
if (err && err !== errors.NoSuchBucket) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
if (err === undefined) {
|
|
||||||
return cb(errors.BucketAlreadyExists);
|
|
||||||
}
|
|
||||||
this.lastItemScanTime = null;
|
|
||||||
this.putBucketAttributes(bucketName,
|
|
||||||
bucketMD,
|
|
||||||
log, cb);
|
|
||||||
return undefined;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
getBucketAttributes(bucketName, log, cb) {
|
|
||||||
this.metastore
|
|
||||||
.withRequestLogger(log)
|
|
||||||
.get(bucketName, {}, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
if (err.ObjNotFound) {
|
|
||||||
return cb(errors.NoSuchBucket);
|
|
||||||
}
|
|
||||||
const logObj = {
|
|
||||||
rawError: err,
|
|
||||||
error: err.message,
|
|
||||||
errorStack: err.stack,
|
|
||||||
};
|
|
||||||
log.error('error getting db attributes', logObj);
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
return cb(null, BucketInfo.deSerialize(data));
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
getBucketAndObject(bucketName, objName, params, log, cb) {
|
|
||||||
this.loadDBIfExists(bucketName, log, (err, db, bucketAttr) => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
db.withRequestLogger(log)
|
|
||||||
.get(objName, params, (err, objAttr) => {
|
|
||||||
if (err) {
|
|
||||||
if (err.ObjNotFound) {
|
|
||||||
return cb(null, {
|
|
||||||
bucket: bucketAttr.serialize(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
const logObj = {
|
|
||||||
rawError: err,
|
|
||||||
error: err.message,
|
|
||||||
errorStack: err.stack,
|
|
||||||
};
|
|
||||||
log.error('error getting object', logObj);
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
return cb(null, {
|
|
||||||
bucket: bucketAttr.serialize(),
|
|
||||||
obj: objAttr,
|
|
||||||
});
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
putBucketAttributes(bucketName, bucketMD, log, cb) {
|
|
||||||
this.metastore
|
|
||||||
.withRequestLogger(log)
|
|
||||||
.put(bucketName, bucketMD.serialize(), {}, err => {
|
|
||||||
if (err) {
|
|
||||||
const logObj = {
|
|
||||||
rawError: err,
|
|
||||||
error: err.message,
|
|
||||||
errorStack: err.stack,
|
|
||||||
};
|
|
||||||
log.error('error putting db attributes', logObj);
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
deleteBucket(bucketName, log, cb) {
|
|
||||||
this.metastore
|
|
||||||
.withRequestLogger(log)
|
|
||||||
.del(bucketName, {}, err => {
|
|
||||||
if (err) {
|
|
||||||
const logObj = {
|
|
||||||
rawError: err,
|
|
||||||
error: err.message,
|
|
||||||
errorStack: err.stack,
|
|
||||||
};
|
|
||||||
log.error('error deleting bucket',
|
|
||||||
logObj);
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
this.lastItemScanTime = null;
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
putObject(bucketName, objName, objVal, params, log, cb) {
|
|
||||||
this.loadDBIfExists(bucketName, log, (err, db) => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
db.withRequestLogger(log)
|
|
||||||
.put(objName, JSON.stringify(objVal), params, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
const logObj = {
|
|
||||||
rawError: err,
|
|
||||||
error: err.message,
|
|
||||||
errorStack: err.stack,
|
|
||||||
};
|
|
||||||
log.error('error putting object', logObj);
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
return cb(err, data);
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
getObject(bucketName, objName, params, log, cb) {
|
|
||||||
this.loadDBIfExists(bucketName, log, (err, db) => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
db.withRequestLogger(log).get(objName, params, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
if (err.ObjNotFound) {
|
|
||||||
return cb(errors.NoSuchKey);
|
|
||||||
}
|
|
||||||
const logObj = {
|
|
||||||
rawError: err,
|
|
||||||
error: err.message,
|
|
||||||
errorStack: err.stack,
|
|
||||||
};
|
|
||||||
log.error('error getting object', logObj);
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
return cb(null, JSON.parse(data));
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
deleteObject(bucketName, objName, params, log, cb) {
|
|
||||||
this.loadDBIfExists(bucketName, log, (err, db) => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
db.withRequestLogger(log).del(objName, params, err => {
|
|
||||||
if (err) {
|
|
||||||
const logObj = {
|
|
||||||
rawError: err,
|
|
||||||
error: err.message,
|
|
||||||
errorStack: err.stack,
|
|
||||||
};
|
|
||||||
log.error('error deleting object', logObj);
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This complex function deals with different extensions of bucket listing:
|
|
||||||
* Delimiter based search or MPU based search.
|
|
||||||
* @param {String} bucketName - The name of the bucket to list
|
|
||||||
* @param {Object} params - The params to search
|
|
||||||
* @param {Object} log - The logger object
|
|
||||||
* @param {function} cb - Callback when done
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
internalListObject(bucketName, params, log, cb) {
|
|
||||||
const extName = params.listingType;
|
|
||||||
const extension = new list[extName](params, log);
|
|
||||||
const requestParams = extension.genMDParams();
|
|
||||||
this.loadDBIfExists(bucketName, log, (err, db) => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
let cbDone = false;
|
|
||||||
db.withRequestLogger(log)
|
|
||||||
.createReadStream(requestParams, (err, stream) => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
stream
|
|
||||||
.on('data', e => {
|
|
||||||
if (extension.filter(e) < 0) {
|
|
||||||
stream.emit('end');
|
|
||||||
stream.destroy();
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.on('error', err => {
|
|
||||||
if (!cbDone) {
|
|
||||||
cbDone = true;
|
|
||||||
const logObj = {
|
|
||||||
rawError: err,
|
|
||||||
error: err.message,
|
|
||||||
errorStack: err.stack,
|
|
||||||
};
|
|
||||||
log.error('error listing objects', logObj);
|
|
||||||
cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.on('end', () => {
|
|
||||||
if (!cbDone) {
|
|
||||||
cbDone = true;
|
|
||||||
const data = extension.result();
|
|
||||||
cb(null, data);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
listObject(bucketName, params, log, cb) {
|
|
||||||
return this.internalListObject(bucketName, params, log, cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
listMultipartUploads(bucketName, params, log, cb) {
|
|
||||||
return this.internalListObject(bucketName, params, log, cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
getUUID(log, cb) {
|
|
||||||
return this.mdDB.getUUID(cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
getDiskUsage(cb) {
|
|
||||||
return this.mdDB.getDiskUsage(cb);
|
|
||||||
}
|
|
||||||
|
|
||||||
countItems(log, cb) {
|
|
||||||
if (this.lastItemScanTime !== null &&
|
|
||||||
(Date.now() - this.lastItemScanTime) <= itemScanRefreshDelay) {
|
|
||||||
return process.nextTick(cb, null, this.lastItemScanResult);
|
|
||||||
}
|
|
||||||
|
|
||||||
const params = {};
|
|
||||||
const extension = new list.Basic(params, log);
|
|
||||||
const requestParams = extension.genMDParams();
|
|
||||||
|
|
||||||
const res = {
|
|
||||||
objects: 0,
|
|
||||||
versions: 0,
|
|
||||||
buckets: 0,
|
|
||||||
bucketList: [],
|
|
||||||
};
|
|
||||||
let cbDone = false;
|
|
||||||
|
|
||||||
this.mdDB.rawListKeys(requestParams, (err, stream) => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
stream
|
|
||||||
.on('data', e => {
|
|
||||||
if (!e.includes(METASTORE)) {
|
|
||||||
if (e.includes(this.constants.usersBucket)) {
|
|
||||||
res.buckets++;
|
|
||||||
res.bucketList.push({
|
|
||||||
name: e.split(this.constants.splitter)[1],
|
|
||||||
});
|
|
||||||
} else if (e.includes(versionSep)) {
|
|
||||||
res.versions++;
|
|
||||||
} else if (!e.includes('..recordLogs#s3-recordlog')) {
|
|
||||||
res.objects++;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.on('error', err => {
|
|
||||||
if (!cbDone) {
|
|
||||||
cbDone = true;
|
|
||||||
const logObj = {
|
|
||||||
error: err,
|
|
||||||
errorMessage: err.message,
|
|
||||||
errorStack: err.stack,
|
|
||||||
};
|
|
||||||
log.error('error listing objects', logObj);
|
|
||||||
cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.on('end', () => {
|
|
||||||
if (!cbDone) {
|
|
||||||
cbDone = true;
|
|
||||||
async.eachSeries(res.bucketList, (bucket, cb) => {
|
|
||||||
this.getBucketAttributes(bucket.name, log,
|
|
||||||
(err, bucketInfo) => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
/* eslint-disable no-param-reassign */
|
|
||||||
bucket.location =
|
|
||||||
bucketInfo.getLocationConstraint();
|
|
||||||
/* eslint-enable no-param-reassign */
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
}, err => {
|
|
||||||
if (!err) {
|
|
||||||
this.lastItemScanTime = Date.now();
|
|
||||||
this.lastItemScanResult = res;
|
|
||||||
}
|
|
||||||
return cb(err, res);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return undefined;
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = BucketFileInterface;
|
|
|
@ -58,7 +58,8 @@ class MetadataFileClient {
|
||||||
logger: this.logger,
|
logger: this.logger,
|
||||||
callTimeoutMs: this.callTimeoutMs,
|
callTimeoutMs: this.callTimeoutMs,
|
||||||
});
|
});
|
||||||
return dbClient.connect(() => done(null, dbClient));
|
dbClient.connect(done);
|
||||||
|
return dbClient;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -1,32 +0,0 @@
|
||||||
const ListResult = require('./ListResult');
|
|
||||||
|
|
||||||
class ListMultipartUploadsResult extends ListResult {
|
|
||||||
constructor() {
|
|
||||||
super();
|
|
||||||
this.Uploads = [];
|
|
||||||
this.NextKeyMarker = undefined;
|
|
||||||
this.NextUploadIdMarker = undefined;
|
|
||||||
}
|
|
||||||
|
|
||||||
addUpload(uploadInfo) {
|
|
||||||
this.Uploads.push({
|
|
||||||
key: decodeURIComponent(uploadInfo.key),
|
|
||||||
value: {
|
|
||||||
UploadId: uploadInfo.uploadId,
|
|
||||||
Initiator: {
|
|
||||||
ID: uploadInfo.initiatorID,
|
|
||||||
DisplayName: uploadInfo.initiatorDisplayName,
|
|
||||||
},
|
|
||||||
Owner: {
|
|
||||||
ID: uploadInfo.ownerID,
|
|
||||||
DisplayName: uploadInfo.ownerDisplayName,
|
|
||||||
},
|
|
||||||
StorageClass: uploadInfo.storageClass,
|
|
||||||
Initiated: uploadInfo.initiated,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
this.MaxKeys += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = ListMultipartUploadsResult;
|
|
|
@ -1,27 +0,0 @@
|
||||||
class ListResult {
|
|
||||||
constructor() {
|
|
||||||
this.IsTruncated = false;
|
|
||||||
this.NextMarker = undefined;
|
|
||||||
this.CommonPrefixes = [];
|
|
||||||
/*
|
|
||||||
Note: this.MaxKeys will get incremented as
|
|
||||||
keys are added so that when response is returned,
|
|
||||||
this.MaxKeys will equal total keys in response
|
|
||||||
(with each CommonPrefix counting as 1 key)
|
|
||||||
*/
|
|
||||||
this.MaxKeys = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
addCommonPrefix(prefix) {
|
|
||||||
if (!this.hasCommonPrefix(prefix)) {
|
|
||||||
this.CommonPrefixes.push(prefix);
|
|
||||||
this.MaxKeys += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
hasCommonPrefix(prefix) {
|
|
||||||
return (this.CommonPrefixes.indexOf(prefix) !== -1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = ListResult;
|
|
|
@ -1,62 +0,0 @@
|
||||||
# bucket_mem design
|
|
||||||
|
|
||||||
## RATIONALE
|
|
||||||
|
|
||||||
The bucket API will be used for managing buckets behind the S3 interface.
|
|
||||||
|
|
||||||
We plan to have only 2 backends using this interface:
|
|
||||||
|
|
||||||
* One production backend
|
|
||||||
* One debug backend purely in memory
|
|
||||||
|
|
||||||
One important remark here is that we don't want an abstraction but a
|
|
||||||
duck-typing style interface (different classes MemoryBucket and Bucket having
|
|
||||||
the same methods putObjectMD(), getObjectMD(), etc).
|
|
||||||
|
|
||||||
Notes about the memory backend: The backend is currently a simple key/value
|
|
||||||
store in memory. The functions actually use nextTick() to emulate the future
|
|
||||||
asynchronous behavior of the production backend.
|
|
||||||
|
|
||||||
## BUCKET API
|
|
||||||
|
|
||||||
The bucket API is a very simple API with 5 functions:
|
|
||||||
|
|
||||||
- putObjectMD(): put metadata for an object in the bucket
|
|
||||||
- getObjectMD(): get metadata from the bucket
|
|
||||||
- deleteObjectMD(): delete metadata for an object from the bucket
|
|
||||||
- deleteBucketMD(): delete a bucket
|
|
||||||
- getBucketListObjects(): perform the complex bucket listing AWS search
|
|
||||||
function with various flavors. This function returns a response in a
|
|
||||||
ListBucketResult object.
|
|
||||||
|
|
||||||
getBucketListObjects(prefix, marker, delimiter, maxKeys, callback) behavior is
|
|
||||||
the following:
|
|
||||||
|
|
||||||
prefix (not required): Limits the response to keys that begin with the
|
|
||||||
specified prefix. You can use prefixes to separate a bucket into different
|
|
||||||
groupings of keys. (You can think of using prefix to make groups in the same
|
|
||||||
way you'd use a folder in a file system.)
|
|
||||||
|
|
||||||
marker (not required): Specifies the key to start with when listing objects in
|
|
||||||
a bucket. Amazon S3 returns object keys in alphabetical order, starting with
|
|
||||||
key after the marker in order.
|
|
||||||
|
|
||||||
delimiter (not required): A delimiter is a character you use to group keys.
|
|
||||||
All keys that contain the same string between the prefix, if specified, and the
|
|
||||||
first occurrence of the delimiter after the prefix are grouped under a single
|
|
||||||
result element, CommonPrefixes. If you don't specify the prefix parameter, then
|
|
||||||
the substring starts at the beginning of the key. The keys that are grouped
|
|
||||||
under CommonPrefixes are not returned elsewhere in the response.
|
|
||||||
|
|
||||||
maxKeys: Sets the maximum number of keys returned in the response body. You can
|
|
||||||
add this to your request if you want to retrieve fewer than the default 1000
|
|
||||||
keys. The response might contain fewer keys but will never contain more. If
|
|
||||||
there are additional keys that satisfy the search criteria but were not
|
|
||||||
returned because maxKeys was exceeded, the response contains an attribute of
|
|
||||||
IsTruncated set to true and a NextMarker. To return the additional keys, call
|
|
||||||
the function again using NextMarker as your marker argument in the function.
|
|
||||||
|
|
||||||
Any key that does not contain the delimiter will be returned individually in
|
|
||||||
Contents rather than in CommonPrefixes.
|
|
||||||
|
|
||||||
If there is an error, the error subfield is returned in the response.
|
|
|
@ -1,34 +0,0 @@
|
||||||
function markerFilterMPU(allMarkers, array) {
|
|
||||||
const { keyMarker, uploadIdMarker } = allMarkers;
|
|
||||||
|
|
||||||
// 1. if the item key matches the keyMarker and an uploadIdMarker exists,
|
|
||||||
// find the first uploadId in the array that is alphabetically after
|
|
||||||
// uploadIdMarker
|
|
||||||
// 2. if the item key does not match the keyMarker, find the first uploadId
|
|
||||||
// in the array that is alphabetically after keyMarker
|
|
||||||
const firstUnfilteredIndex = array.findIndex(
|
|
||||||
item => (uploadIdMarker && item.key === keyMarker ?
|
|
||||||
item.uploadId > uploadIdMarker :
|
|
||||||
item.key > keyMarker));
|
|
||||||
return firstUnfilteredIndex !== -1 ? array.slice(firstUnfilteredIndex) : [];
|
|
||||||
}
|
|
||||||
|
|
||||||
function prefixFilter(prefix, array) {
|
|
||||||
for (let i = 0; i < array.length; i++) {
|
|
||||||
if (array[i].indexOf(prefix) !== 0) {
|
|
||||||
array.splice(i, 1);
|
|
||||||
i--;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return array;
|
|
||||||
}
|
|
||||||
|
|
||||||
function isKeyInContents(responseObject, key) {
|
|
||||||
return responseObject.Contents.some(val => val.key === key);
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
markerFilterMPU,
|
|
||||||
prefixFilter,
|
|
||||||
isKeyInContents,
|
|
||||||
};
|
|
|
@ -1,148 +0,0 @@
|
||||||
const errors = require('../../../errors');
|
|
||||||
|
|
||||||
const { markerFilterMPU, prefixFilter } = require('./bucket_utilities');
|
|
||||||
const ListMultipartUploadsResult = require('./ListMultipartUploadsResult');
|
|
||||||
const { metadata } = require('./metadata');
|
|
||||||
|
|
||||||
const defaultMaxKeys = 1000;
|
|
||||||
function getMultipartUploadListing(bucket, params, callback) {
|
|
||||||
const { delimiter, keyMarker,
|
|
||||||
uploadIdMarker, prefix, queryPrefixLength, splitter } = params;
|
|
||||||
const splitterLen = splitter.length;
|
|
||||||
const maxKeys = params.maxKeys !== undefined ?
|
|
||||||
Number.parseInt(params.maxKeys, 10) : defaultMaxKeys;
|
|
||||||
const response = new ListMultipartUploadsResult();
|
|
||||||
const keyMap = metadata.keyMaps.get(bucket.getName());
|
|
||||||
if (prefix) {
|
|
||||||
response.Prefix = prefix;
|
|
||||||
if (typeof prefix !== 'string') {
|
|
||||||
return callback(errors.InvalidArgument);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (keyMarker) {
|
|
||||||
response.KeyMarker = keyMarker;
|
|
||||||
if (typeof keyMarker !== 'string') {
|
|
||||||
return callback(errors.InvalidArgument);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (uploadIdMarker) {
|
|
||||||
response.UploadIdMarker = uploadIdMarker;
|
|
||||||
if (typeof uploadIdMarker !== 'string') {
|
|
||||||
return callback(errors.InvalidArgument);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (delimiter) {
|
|
||||||
response.Delimiter = delimiter;
|
|
||||||
if (typeof delimiter !== 'string') {
|
|
||||||
return callback(errors.InvalidArgument);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (maxKeys && typeof maxKeys !== 'number') {
|
|
||||||
return callback(errors.InvalidArgument);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort uploads alphatebetically by objectKey and if same objectKey,
|
|
||||||
// then sort in ascending order by time initiated
|
|
||||||
let uploads = [];
|
|
||||||
keyMap.forEach((val, key) => {
|
|
||||||
uploads.push(key);
|
|
||||||
});
|
|
||||||
uploads.sort((a, b) => {
|
|
||||||
const aIndex = a.indexOf(splitter);
|
|
||||||
const bIndex = b.indexOf(splitter);
|
|
||||||
const aObjectKey = a.substring(aIndex + splitterLen);
|
|
||||||
const bObjectKey = b.substring(bIndex + splitterLen);
|
|
||||||
const aInitiated = keyMap.get(a).initiated;
|
|
||||||
const bInitiated = keyMap.get(b).initiated;
|
|
||||||
if (aObjectKey === bObjectKey) {
|
|
||||||
if (Date.parse(aInitiated) >= Date.parse(bInitiated)) {
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
if (Date.parse(aInitiated) < Date.parse(bInitiated)) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return (aObjectKey < bObjectKey) ? -1 : 1;
|
|
||||||
});
|
|
||||||
// Edit the uploads array so it only
|
|
||||||
// contains keys that contain the prefix
|
|
||||||
uploads = prefixFilter(prefix, uploads);
|
|
||||||
uploads = uploads.map(stringKey => {
|
|
||||||
const index = stringKey.indexOf(splitter);
|
|
||||||
const index2 = stringKey.indexOf(splitter, index + splitterLen);
|
|
||||||
const storedMD = keyMap.get(stringKey);
|
|
||||||
return {
|
|
||||||
key: stringKey.substring(index + splitterLen, index2),
|
|
||||||
uploadId: stringKey.substring(index2 + splitterLen),
|
|
||||||
bucket: storedMD.eventualStorageBucket,
|
|
||||||
initiatorID: storedMD.initiator.ID,
|
|
||||||
initiatorDisplayName: storedMD.initiator.DisplayName,
|
|
||||||
ownerID: storedMD['owner-id'],
|
|
||||||
ownerDisplayName: storedMD['owner-display-name'],
|
|
||||||
storageClass: storedMD['x-amz-storage-class'],
|
|
||||||
initiated: storedMD.initiated,
|
|
||||||
};
|
|
||||||
});
|
|
||||||
// If keyMarker specified, edit the uploads array so it
|
|
||||||
// only contains keys that occur alphabetically after the marker.
|
|
||||||
// If there is also an uploadIdMarker specified, filter to eliminate
|
|
||||||
// any uploads that share the keyMarker and have an uploadId before
|
|
||||||
// the uploadIdMarker.
|
|
||||||
if (keyMarker) {
|
|
||||||
const allMarkers = {
|
|
||||||
keyMarker,
|
|
||||||
uploadIdMarker,
|
|
||||||
};
|
|
||||||
uploads = markerFilterMPU(allMarkers, uploads);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterate through uploads and filter uploads
|
|
||||||
// with keys containing delimiter
|
|
||||||
// into response.CommonPrefixes and filter remaining uploads
|
|
||||||
// into response.Uploads
|
|
||||||
for (let i = 0; i < uploads.length; i++) {
|
|
||||||
const currentUpload = uploads[i];
|
|
||||||
// If hit maxKeys, stop adding keys to response
|
|
||||||
if (response.MaxKeys >= maxKeys) {
|
|
||||||
response.IsTruncated = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
// If a delimiter is specified, find its
|
|
||||||
// index in the current key AFTER THE OCCURRENCE OF THE PREFIX
|
|
||||||
// THAT WAS SENT IN THE QUERY (not the prefix including the splitter
|
|
||||||
// and other elements)
|
|
||||||
let delimiterIndexAfterPrefix = -1;
|
|
||||||
const currentKeyWithoutPrefix =
|
|
||||||
currentUpload.key.slice(queryPrefixLength);
|
|
||||||
let sliceEnd;
|
|
||||||
if (delimiter) {
|
|
||||||
delimiterIndexAfterPrefix = currentKeyWithoutPrefix
|
|
||||||
.indexOf(delimiter);
|
|
||||||
sliceEnd = delimiterIndexAfterPrefix + queryPrefixLength;
|
|
||||||
}
|
|
||||||
// If delimiter occurs in current key, add key to
|
|
||||||
// response.CommonPrefixes.
|
|
||||||
// Otherwise add upload to response.Uploads
|
|
||||||
if (delimiterIndexAfterPrefix > -1) {
|
|
||||||
const keySubstring = currentUpload.key.slice(0, sliceEnd + 1);
|
|
||||||
response.addCommonPrefix(keySubstring);
|
|
||||||
} else {
|
|
||||||
response.NextKeyMarker = currentUpload.key;
|
|
||||||
response.NextUploadIdMarker = currentUpload.uploadId;
|
|
||||||
response.addUpload(currentUpload);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// `response.MaxKeys` should be the value from the original `MaxUploads`
|
|
||||||
// parameter specified by the user (or else the default 1000). Redefine it
|
|
||||||
// here, so it does not equal the value of `uploads.length`.
|
|
||||||
response.MaxKeys = maxKeys;
|
|
||||||
// If `response.MaxKeys` is 0, `response.IsTruncated` should be `false`.
|
|
||||||
response.IsTruncated = maxKeys === 0 ? false : response.IsTruncated;
|
|
||||||
return callback(null, response);
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = getMultipartUploadListing;
|
|
|
@ -1,8 +0,0 @@
|
||||||
const metadata = {
|
|
||||||
buckets: new Map,
|
|
||||||
keyMaps: new Map,
|
|
||||||
};
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
metadata,
|
|
||||||
};
|
|
|
@ -1,318 +0,0 @@
|
||||||
const errors = require('../../../errors');
|
|
||||||
const list = require('../../../algos/list/exportAlgos');
|
|
||||||
const genVID =
|
|
||||||
require('../../../versioning/VersionID').generateVersionId;
|
|
||||||
|
|
||||||
const getMultipartUploadListing = require('./getMultipartUploadListing');
|
|
||||||
const { metadata } = require('./metadata');
|
|
||||||
|
|
||||||
// const genVID = versioning.VersionID.generateVersionId;
|
|
||||||
|
|
||||||
const defaultMaxKeys = 1000;
|
|
||||||
let uidCounter = 0;
|
|
||||||
|
|
||||||
function generateVersionId(replicationGroupId) {
|
|
||||||
return genVID(uidCounter++, replicationGroupId);
|
|
||||||
}
|
|
||||||
|
|
||||||
function formatVersionKey(key, versionId) {
|
|
||||||
return `${key}\0${versionId}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
function inc(str) {
|
|
||||||
return str ? (str.slice(0, str.length - 1) +
|
|
||||||
String.fromCharCode(str.charCodeAt(str.length - 1) + 1)) : str;
|
|
||||||
}
|
|
||||||
|
|
||||||
const metastore = {
|
|
||||||
createBucket: (bucketName, bucketMD, log, cb) => {
|
|
||||||
process.nextTick(() => {
|
|
||||||
metastore.getBucketAttributes(bucketName, log, (err, bucket) => {
|
|
||||||
// TODO Check whether user already owns the bucket,
|
|
||||||
// if so return "BucketAlreadyOwnedByYou"
|
|
||||||
// If not owned by user, return "BucketAlreadyExists"
|
|
||||||
if (bucket) {
|
|
||||||
return cb(errors.BucketAlreadyExists);
|
|
||||||
}
|
|
||||||
metadata.buckets.set(bucketName, bucketMD);
|
|
||||||
metadata.keyMaps.set(bucketName, new Map);
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
putBucketAttributes: (bucketName, bucketMD, log, cb) => {
|
|
||||||
process.nextTick(() => {
|
|
||||||
metastore.getBucketAttributes(bucketName, log, err => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
metadata.buckets.set(bucketName, bucketMD);
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
getBucketAttributes: (bucketName, log, cb) => {
|
|
||||||
process.nextTick(() => {
|
|
||||||
if (!metadata.buckets.has(bucketName)) {
|
|
||||||
return cb(errors.NoSuchBucket);
|
|
||||||
}
|
|
||||||
return cb(null, metadata.buckets.get(bucketName));
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
deleteBucket: (bucketName, log, cb) => {
|
|
||||||
process.nextTick(() => {
|
|
||||||
metastore.getBucketAttributes(bucketName, log, err => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
if (metadata.keyMaps.has(bucketName)
|
|
||||||
&& metadata.keyMaps.get(bucketName).length > 0) {
|
|
||||||
return cb(errors.BucketNotEmpty);
|
|
||||||
}
|
|
||||||
metadata.buckets.delete(bucketName);
|
|
||||||
metadata.keyMaps.delete(bucketName);
|
|
||||||
return cb(null);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
putObject: (bucketName, objName, objVal, params, log, cb) => {
|
|
||||||
process.nextTick(() => {
|
|
||||||
metastore.getBucketAttributes(bucketName, log, err => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
valid combinations of versioning options:
|
|
||||||
- !versioning && !versionId: normal non-versioning put
|
|
||||||
- versioning && !versionId: create a new version
|
|
||||||
- versionId: update (PUT/DELETE) an existing version,
|
|
||||||
and also update master version in case the put
|
|
||||||
version is newer or same version than master.
|
|
||||||
if versionId === '' update master version
|
|
||||||
*/
|
|
||||||
|
|
||||||
if (params && params.versionId) {
|
|
||||||
objVal.versionId = params.versionId; // eslint-disable-line
|
|
||||||
const mst = metadata.keyMaps.get(bucketName).get(objName);
|
|
||||||
if (mst && mst.versionId === params.versionId || !mst) {
|
|
||||||
metadata.keyMaps.get(bucketName).set(objName, objVal);
|
|
||||||
}
|
|
||||||
// eslint-disable-next-line
|
|
||||||
objName = formatVersionKey(objName, params.versionId);
|
|
||||||
metadata.keyMaps.get(bucketName).set(objName, objVal);
|
|
||||||
return cb(null, `{"versionId":"${objVal.versionId}"}`);
|
|
||||||
}
|
|
||||||
if (params && params.versioning) {
|
|
||||||
const versionId = generateVersionId();
|
|
||||||
objVal.versionId = versionId; // eslint-disable-line
|
|
||||||
metadata.keyMaps.get(bucketName).set(objName, objVal);
|
|
||||||
// eslint-disable-next-line
|
|
||||||
objName = formatVersionKey(objName, versionId);
|
|
||||||
metadata.keyMaps.get(bucketName).set(objName, objVal);
|
|
||||||
return cb(null, `{"versionId":"${versionId}"}`);
|
|
||||||
}
|
|
||||||
if (params && params.versionId === '') {
|
|
||||||
const versionId = generateVersionId();
|
|
||||||
objVal.versionId = versionId; // eslint-disable-line
|
|
||||||
metadata.keyMaps.get(bucketName).set(objName, objVal);
|
|
||||||
return cb(null, `{"versionId":"${objVal.versionId}"}`);
|
|
||||||
}
|
|
||||||
metadata.keyMaps.get(bucketName).set(objName, objVal);
|
|
||||||
return cb(null);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
getBucketAndObject: (bucketName, objName, params, log, cb) => {
|
|
||||||
process.nextTick(() => {
|
|
||||||
metastore.getBucketAttributes(bucketName, log, (err, bucket) => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err, { bucket });
|
|
||||||
}
|
|
||||||
if (params && params.versionId) {
|
|
||||||
// eslint-disable-next-line
|
|
||||||
objName = formatVersionKey(objName, params.versionId);
|
|
||||||
}
|
|
||||||
if (!metadata.keyMaps.has(bucketName)
|
|
||||||
|| !metadata.keyMaps.get(bucketName).has(objName)) {
|
|
||||||
return cb(null, { bucket: bucket.serialize() });
|
|
||||||
}
|
|
||||||
return cb(null, {
|
|
||||||
bucket: bucket.serialize(),
|
|
||||||
obj: JSON.stringify(
|
|
||||||
metadata.keyMaps.get(bucketName).get(objName)
|
|
||||||
),
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
getObject: (bucketName, objName, params, log, cb) => {
|
|
||||||
process.nextTick(() => {
|
|
||||||
metastore.getBucketAttributes(bucketName, log, err => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
if (params && params.versionId) {
|
|
||||||
// eslint-disable-next-line
|
|
||||||
objName = formatVersionKey(objName, params.versionId);
|
|
||||||
}
|
|
||||||
if (!metadata.keyMaps.has(bucketName)
|
|
||||||
|| !metadata.keyMaps.get(bucketName).has(objName)) {
|
|
||||||
return cb(errors.NoSuchKey);
|
|
||||||
}
|
|
||||||
return cb(null, metadata.keyMaps.get(bucketName).get(objName));
|
|
||||||
});
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
deleteObject: (bucketName, objName, params, log, cb) => {
|
|
||||||
process.nextTick(() => {
|
|
||||||
metastore.getBucketAttributes(bucketName, log, err => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
if (!metadata.keyMaps.get(bucketName).has(objName)) {
|
|
||||||
return cb(errors.NoSuchKey);
|
|
||||||
}
|
|
||||||
if (params && params.versionId) {
|
|
||||||
const baseKey = inc(formatVersionKey(objName, ''));
|
|
||||||
const vobjName = formatVersionKey(objName,
|
|
||||||
params.versionId);
|
|
||||||
metadata.keyMaps.get(bucketName).delete(vobjName);
|
|
||||||
const mst = metadata.keyMaps.get(bucketName).get(objName);
|
|
||||||
if (mst.versionId === params.versionId) {
|
|
||||||
const keys = [];
|
|
||||||
metadata.keyMaps.get(bucketName).forEach((val, key) => {
|
|
||||||
if (key < baseKey && key > vobjName) {
|
|
||||||
keys.push(key);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
if (keys.length === 0) {
|
|
||||||
metadata.keyMaps.get(bucketName).delete(objName);
|
|
||||||
return cb();
|
|
||||||
}
|
|
||||||
const key = keys.sort()[0];
|
|
||||||
const value = metadata.keyMaps.get(bucketName).get(key);
|
|
||||||
metadata.keyMaps.get(bucketName).set(objName, value);
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
}
|
|
||||||
metadata.keyMaps.get(bucketName).delete(objName);
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
_hasDeleteMarker(key, keyMap) {
|
|
||||||
const objectMD = keyMap.get(key);
|
|
||||||
if (objectMD['x-amz-delete-marker'] !== undefined) {
|
|
||||||
return (objectMD['x-amz-delete-marker'] === true);
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
},
|
|
||||||
|
|
||||||
listObject(bucketName, params, log, cb) {
|
|
||||||
process.nextTick(() => {
|
|
||||||
const { prefix, marker, delimiter, maxKeys } = params;
|
|
||||||
if (prefix && typeof prefix !== 'string') {
|
|
||||||
return cb(errors.InvalidArgument);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (marker && typeof marker !== 'string') {
|
|
||||||
return cb(errors.InvalidArgument);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (delimiter && typeof delimiter !== 'string') {
|
|
||||||
return cb(errors.InvalidArgument);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (maxKeys && typeof maxKeys !== 'number') {
|
|
||||||
return cb(errors.InvalidArgument);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If paramMaxKeys is undefined, the default parameter will set it.
|
|
||||||
// However, if it is null, the default parameter will not set it.
|
|
||||||
let numKeys = maxKeys;
|
|
||||||
if (numKeys === null) {
|
|
||||||
numKeys = defaultMaxKeys;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!metadata.keyMaps.has(bucketName)) {
|
|
||||||
return cb(errors.NoSuchBucket);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If marker specified, edit the keys array so it
|
|
||||||
// only contains keys that occur alphabetically after the marker
|
|
||||||
const listingType = params.listingType;
|
|
||||||
const extension = new list[listingType](params, log);
|
|
||||||
const listingParams = extension.genMDParams();
|
|
||||||
|
|
||||||
const keys = [];
|
|
||||||
metadata.keyMaps.get(bucketName).forEach((val, key) => {
|
|
||||||
if (listingParams.gt && listingParams.gt >= key) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
if (listingParams.gte && listingParams.gte > key) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
if (listingParams.lt && key >= listingParams.lt) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
if (listingParams.lte && key > listingParams.lte) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
return keys.push(key);
|
|
||||||
});
|
|
||||||
keys.sort();
|
|
||||||
|
|
||||||
// Iterate through keys array and filter keys containing
|
|
||||||
// delimiter into response.CommonPrefixes and filter remaining
|
|
||||||
// keys into response.Contents
|
|
||||||
for (let i = 0; i < keys.length; ++i) {
|
|
||||||
const currentKey = keys[i];
|
|
||||||
// Do not list object with delete markers
|
|
||||||
if (this._hasDeleteMarker(currentKey,
|
|
||||||
metadata.keyMaps.get(bucketName))) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
const objMD = metadata.keyMaps.get(bucketName).get(currentKey);
|
|
||||||
const value = JSON.stringify(objMD);
|
|
||||||
const obj = {
|
|
||||||
key: currentKey,
|
|
||||||
value,
|
|
||||||
};
|
|
||||||
// calling Ext.filter(obj) adds the obj to the Ext result if
|
|
||||||
// not filtered.
|
|
||||||
// Also, Ext.filter returns false when hit max keys.
|
|
||||||
// What a nifty function!
|
|
||||||
if (extension.filter(obj) < 0) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return cb(null, extension.result());
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
listMultipartUploads(bucketName, listingParams, log, cb) {
|
|
||||||
process.nextTick(() => {
|
|
||||||
metastore.getBucketAttributes(bucketName, log, (err, bucket) => {
|
|
||||||
if (bucket === undefined) {
|
|
||||||
// no on going multipart uploads, return empty listing
|
|
||||||
return cb(null, {
|
|
||||||
IsTruncated: false,
|
|
||||||
NextMarker: undefined,
|
|
||||||
MaxKeys: 0,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return getMultipartUploadListing(bucket, listingParams, cb);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
module.exports = metastore;
|
|
|
@ -1,271 +0,0 @@
|
||||||
const NEW_OBJ = 0;
|
|
||||||
const NEW_VER = 1;
|
|
||||||
const UPDATE_VER = 2;
|
|
||||||
const UPDATE_MST = 3;
|
|
||||||
const RESTORE = 4;
|
|
||||||
|
|
||||||
const DEL_VER = 0;
|
|
||||||
const DEL_MST = 1;
|
|
||||||
|
|
||||||
const CURR = 'curr';
|
|
||||||
const PREV = 'prev';
|
|
||||||
|
|
||||||
function deepCopyObject(obj) {
|
|
||||||
return JSON.parse(JSON.stringify(obj));
|
|
||||||
}
|
|
||||||
|
|
||||||
class DataCounter {
|
|
||||||
/**
|
|
||||||
* DataCounter - class for keeping track of the ItemCount metrics
|
|
||||||
* @return {DataCounter} DataCounter object
|
|
||||||
*/
|
|
||||||
constructor() {
|
|
||||||
this.objects = 0;
|
|
||||||
this.versions = 0;
|
|
||||||
this.dataManaged = {
|
|
||||||
total: { curr: 0, prev: 0 },
|
|
||||||
byLocation: {},
|
|
||||||
};
|
|
||||||
this.populated = false;
|
|
||||||
this.transientList = {};
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* updateTransientList - update data counter list of transient locations
|
|
||||||
* @param {Object} newLocations - list of locations constraint details
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
updateTransientList(newLocations) {
|
|
||||||
if (newLocations && Object.keys(newLocations).length > 0) {
|
|
||||||
const tempList = {};
|
|
||||||
Object.keys(newLocations).forEach(loc => {
|
|
||||||
tempList[loc] = newLocations[loc].isTransient;
|
|
||||||
});
|
|
||||||
this.transientList = tempList;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* set - set DataCounter values
|
|
||||||
* @param {Object} setVal - object containing values to be used for setting
|
|
||||||
* DataCounter
|
|
||||||
* @param {number} setVal.objects - number of master objects
|
|
||||||
* @param {number} setVal.versions - number of versioned objects
|
|
||||||
* @param {Object} setVal.dataManaged - object containing information about
|
|
||||||
* all the data managed
|
|
||||||
* @param {Object} setVal.total - object containing the total byte count of
|
|
||||||
* data managed
|
|
||||||
* @param {number} setVal.total.curr - the total byte count of master
|
|
||||||
* objects
|
|
||||||
* @param {number} setVal.total.prev - the total byte count of versioned
|
|
||||||
* objects
|
|
||||||
* @param {Object} setVal.byLocaton - object containing the information
|
|
||||||
* about data managed on each location
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
set(setVal) {
|
|
||||||
if (setVal) {
|
|
||||||
this.objects = setVal.objects;
|
|
||||||
this.versions = setVal.versions;
|
|
||||||
this.dataManaged = deepCopyObject(setVal.dataManaged);
|
|
||||||
this.populated = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* results - creates a deep copy of the current DataCounter values
|
|
||||||
* @return {Object} - object containing the current DataCounter values
|
|
||||||
*/
|
|
||||||
results() {
|
|
||||||
const obj = {
|
|
||||||
objects: this.objects,
|
|
||||||
versions: this.versions,
|
|
||||||
dataManaged: this.dataManaged,
|
|
||||||
};
|
|
||||||
return deepCopyObject(obj);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* addObjectFn - performing add operations
|
|
||||||
* @param {ObjectMD} currMD - new master version metadata
|
|
||||||
* @param {ObjectMD} prevMD - old master version metadata
|
|
||||||
* @param {number} type - index of the current type of add operation
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
addObject(currMD, prevMD, type) {
|
|
||||||
if (type !== undefined && type !== null && this.populated) {
|
|
||||||
switch (type) {
|
|
||||||
case NEW_OBJ: // add new object, replace master if needed
|
|
||||||
if (prevMD) {
|
|
||||||
this._delValue(prevMD, CURR);
|
|
||||||
this._addValue(currMD, CURR);
|
|
||||||
} else {
|
|
||||||
++this.objects;
|
|
||||||
this._addValue(currMD, CURR);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case NEW_VER: // add new object, archive master
|
|
||||||
++this.versions;
|
|
||||||
this._delValue(prevMD, CURR);
|
|
||||||
this._addValue(prevMD, PREV);
|
|
||||||
this._addValue(currMD, CURR);
|
|
||||||
break;
|
|
||||||
case UPDATE_VER: // update archived object, replication info
|
|
||||||
this._updateObject(currMD, prevMD, PREV);
|
|
||||||
break;
|
|
||||||
case UPDATE_MST: // update master object, replication info
|
|
||||||
this._updateObject(currMD, prevMD, CURR);
|
|
||||||
break;
|
|
||||||
case RESTORE:
|
|
||||||
--this.versions;
|
|
||||||
this._delValue(currMD, PREV);
|
|
||||||
++this.objects;
|
|
||||||
this._addValue(currMD, CURR);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
// should throw error, noop
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* delObjectFn - performing del operations
|
|
||||||
* @param {ObjectMD} currMD - object metadata
|
|
||||||
* @param {number} type - index of the current type of delete operation
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
delObject(currMD, type) {
|
|
||||||
if (type !== undefined && type !== null && this.populated) {
|
|
||||||
switch (type) {
|
|
||||||
case DEL_VER:
|
|
||||||
--this.versions;
|
|
||||||
this._delValue(currMD, PREV);
|
|
||||||
break;
|
|
||||||
case DEL_MST:
|
|
||||||
--this.objects;
|
|
||||||
this._delValue(currMD, CURR);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
// should throw error, noop
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_addLocation(site, size, type) {
|
|
||||||
this.dataManaged.total[type] += size;
|
|
||||||
if (!this.dataManaged.byLocation[site]) {
|
|
||||||
this.dataManaged.byLocation[site] = {
|
|
||||||
curr: 0,
|
|
||||||
prev: 0,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
this.dataManaged.byLocation[site][type] += size;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* _addValue - helper function for handling put object updates
|
|
||||||
* @param {ObjectMD} objMD - object metadata
|
|
||||||
* @param {string} type - string with value either 'curr' or 'prev'
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
_addValue(objMD, type) {
|
|
||||||
if (objMD) {
|
|
||||||
const { replicationInfo, 'content-length': size } = objMD;
|
|
||||||
const { backends } = replicationInfo || {};
|
|
||||||
this._addLocation(objMD.dataStoreName, size, type);
|
|
||||||
if (backends && Array.isArray(backends)) {
|
|
||||||
backends.forEach(loc => {
|
|
||||||
const { site, status } = loc;
|
|
||||||
if (status === 'COMPLETED') {
|
|
||||||
this._addLocation(site, size, type);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* _updateObject - helper function for handling updates from replication
|
|
||||||
* info changes
|
|
||||||
* @param {ObjectMD} currMD - new object metadata
|
|
||||||
* @param {ObjectMD} prevMD - old object metadata
|
|
||||||
* @param {string} type - string with value either 'curr' or 'prev'
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
_updateObject(currMD, prevMD, type) {
|
|
||||||
const transientList = Object.assign({}, this.transientList);
|
|
||||||
if (currMD && prevMD) {
|
|
||||||
// check for changes in replication
|
|
||||||
const { replicationInfo: currLocs,
|
|
||||||
'content-length': size, dataStoreName } = currMD;
|
|
||||||
const { replicationInfo: prevLocs } = prevMD;
|
|
||||||
const { backends: prevBackends } = prevLocs || {};
|
|
||||||
const { backends: currBackends } = currLocs || {};
|
|
||||||
const oldLocs = {};
|
|
||||||
if (prevBackends && Array.isArray(prevBackends)) {
|
|
||||||
prevBackends.forEach(loc => {
|
|
||||||
const { site, status } = loc;
|
|
||||||
oldLocs[site] = status;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if (currBackends && Array.isArray(currBackends)) {
|
|
||||||
currBackends.forEach(loc => {
|
|
||||||
const { site, status } = loc;
|
|
||||||
if (site in oldLocs && status === 'COMPLETED' &&
|
|
||||||
oldLocs[site] !== status) {
|
|
||||||
this._addLocation(site, size, type);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if (currLocs.status === 'COMPLETED' &&
|
|
||||||
transientList[dataStoreName]) {
|
|
||||||
this._delLocation(dataStoreName, size, type);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_delLocation(site, size, type) {
|
|
||||||
if (this.dataManaged.byLocation[site]) {
|
|
||||||
this.dataManaged.total[type] -= size;
|
|
||||||
this.dataManaged.total[type] =
|
|
||||||
Math.max(0, this.dataManaged.total[type]);
|
|
||||||
this.dataManaged.byLocation[site][type] -= size;
|
|
||||||
this.dataManaged.byLocation[site][type] =
|
|
||||||
Math.max(0, this.dataManaged.byLocation[site][type]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/**
|
|
||||||
* _delValue - helper function for handling delete object operations
|
|
||||||
* @param {ObjectMD} objMD - object metadata
|
|
||||||
* @param {string} type - string with value either 'curr' or 'prev'
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
_delValue(objMD, type) {
|
|
||||||
if (objMD) {
|
|
||||||
const { replicationInfo, 'content-length': size } = objMD;
|
|
||||||
const { backends } = replicationInfo || {};
|
|
||||||
this._delLocation(objMD.dataStoreName, size, type);
|
|
||||||
if (backends && Array.isArray(backends)) {
|
|
||||||
backends.forEach(loc => {
|
|
||||||
const { site, status } = loc;
|
|
||||||
if (status === 'COMPLETED') {
|
|
||||||
this._delLocation(site, size, type);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
NEW_OBJ,
|
|
||||||
NEW_VER,
|
|
||||||
UPDATE_VER,
|
|
||||||
UPDATE_MST,
|
|
||||||
RESTORE,
|
|
||||||
DEL_VER,
|
|
||||||
DEL_MST,
|
|
||||||
DataCounter,
|
|
||||||
};
|
|
|
@ -1,89 +0,0 @@
|
||||||
const stream = require('stream');
|
|
||||||
|
|
||||||
class ListRecordStream extends stream.Transform {
|
|
||||||
constructor(logger, lastEndID) {
|
|
||||||
super({ objectMode: true });
|
|
||||||
this._logger = logger;
|
|
||||||
this._lastEndID = lastEndID;
|
|
||||||
this._lastTs = 0;
|
|
||||||
this._lastUniqID = null;
|
|
||||||
// this._unpublishedListing is true once we pass the oplog
|
|
||||||
// that has the start seq timestamp and uniqID 'h'
|
|
||||||
this._unpublishedListing = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
_transform(itemObj, encoding, callback) {
|
|
||||||
// always update to most recent uniqID
|
|
||||||
this._lastUniqID = itemObj.h.toString();
|
|
||||||
|
|
||||||
if (this._lastTs === null || itemObj.ts.toNumber() > this._lastTs) {
|
|
||||||
this._lastTs = itemObj.ts.toNumber();
|
|
||||||
}
|
|
||||||
|
|
||||||
// only push to stream unpublished objects
|
|
||||||
if (!this._unpublishedListing) {
|
|
||||||
// When an oplog with a unique ID that is stored in the
|
|
||||||
// log offset is found, all oplogs AFTER this is unpublished.
|
|
||||||
if (!this._lastEndID || this._lastEndID === itemObj.h.toString()) {
|
|
||||||
this._unpublishedListing = true;
|
|
||||||
}
|
|
||||||
return callback();
|
|
||||||
}
|
|
||||||
|
|
||||||
const dbName = itemObj.ns.split('.');
|
|
||||||
let entry;
|
|
||||||
if (itemObj.op === 'i' &&
|
|
||||||
itemObj.o && itemObj.o._id) {
|
|
||||||
entry = {
|
|
||||||
type: 'put',
|
|
||||||
key: itemObj.o._id,
|
|
||||||
// value is given as-is for inserts
|
|
||||||
value: JSON.stringify(itemObj.o.value),
|
|
||||||
};
|
|
||||||
} else if (itemObj.op === 'u' &&
|
|
||||||
itemObj.o && itemObj.o2 && itemObj.o2._id) {
|
|
||||||
entry = {
|
|
||||||
type: 'put', // updates overwrite the whole metadata,
|
|
||||||
// so they are considered as puts
|
|
||||||
key: itemObj.o2._id,
|
|
||||||
// updated value may be either stored directly in 'o'
|
|
||||||
// attribute or in '$set' attribute (supposedly when
|
|
||||||
// the object pre-exists it will be in '$set')
|
|
||||||
value: JSON.stringify(
|
|
||||||
(itemObj.o.$set ? itemObj.o.$set : itemObj.o).value),
|
|
||||||
};
|
|
||||||
} else if (itemObj.op === 'd' &&
|
|
||||||
itemObj.o && itemObj.o._id) {
|
|
||||||
entry = {
|
|
||||||
type: 'delete',
|
|
||||||
key: itemObj.o._id,
|
|
||||||
// deletion yields no value
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
// skip other entry types as we don't need them for now
|
|
||||||
// ('c', ...?)
|
|
||||||
return callback();
|
|
||||||
}
|
|
||||||
const streamObject = {
|
|
||||||
timestamp: new Date((itemObj.ts ?
|
|
||||||
itemObj.ts.toNumber() * 1000 : 0)),
|
|
||||||
db: dbName[1],
|
|
||||||
entries: [entry],
|
|
||||||
};
|
|
||||||
return callback(null, streamObject);
|
|
||||||
}
|
|
||||||
|
|
||||||
_flush(callback) {
|
|
||||||
this.emit('info', {
|
|
||||||
// store both the timestamp and unique oplog id in an
|
|
||||||
// opaque JSON string returned to the reader
|
|
||||||
end: JSON.stringify({
|
|
||||||
ts: this._lastTs,
|
|
||||||
uniqID: this._lastUniqID,
|
|
||||||
}),
|
|
||||||
});
|
|
||||||
callback();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = ListRecordStream;
|
|
|
@ -1,103 +0,0 @@
|
||||||
'use strict'; // eslint-disable-line
|
|
||||||
|
|
||||||
const MongoClient = require('mongodb').MongoClient;
|
|
||||||
const ListRecordStream = require('./ListRecordStream');
|
|
||||||
const { Timestamp } = require('bson');
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @class
|
|
||||||
* @classdesc Class to consume mongo oplog
|
|
||||||
*/
|
|
||||||
class LogConsumer {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @constructor
|
|
||||||
*
|
|
||||||
* @param {object} mongoConfig - object with the mongo configuration
|
|
||||||
* @param {string} logger - logger
|
|
||||||
*/
|
|
||||||
constructor(mongoConfig, logger) {
|
|
||||||
const { replicaSetHosts, database } = mongoConfig;
|
|
||||||
// 'local' is the database where MongoDB has oplogs.rs capped collection
|
|
||||||
this.database = 'local';
|
|
||||||
this.mongoUrl = `mongodb://${replicaSetHosts}/local`;
|
|
||||||
this.logger = logger;
|
|
||||||
this.metadataDatabase = database;
|
|
||||||
this.oplogNsRegExp = new RegExp(`^${database}\\.`);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Connect to MongoClient using Mongo node module to access database and
|
|
||||||
* database oplogs (operation logs)
|
|
||||||
*
|
|
||||||
* @param {function} done - callback function, called with an error object
|
|
||||||
* or null and an object as 2nd parameter
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
connectMongo(done) {
|
|
||||||
MongoClient.connect(this.mongoUrl, { replicaSet: 'rs0' },
|
|
||||||
(err, client) => {
|
|
||||||
if (err) {
|
|
||||||
this.logger.error('Unable to connect to MongoDB',
|
|
||||||
{ error: err });
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
this.logger.info('connected to mongodb');
|
|
||||||
this.client = client;
|
|
||||||
this.db = client.db(this.database, {
|
|
||||||
ignoreUndefined: true,
|
|
||||||
});
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
/**
|
|
||||||
* Read a series of log records from mongo
|
|
||||||
*
|
|
||||||
* @param {Object} [params] - params object
|
|
||||||
* @param {String} [params.startSeq] - fetch starting from this
|
|
||||||
* opaque offset returned previously by mongo ListRecordStream
|
|
||||||
* in an 'info' event
|
|
||||||
* @param {Number} [params.limit] - maximum number of log records
|
|
||||||
* to return
|
|
||||||
* @param {function} cb - callback function, called with an error
|
|
||||||
* object or null and an object as 2nd parameter
|
|
||||||
*
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
readRecords(params, cb) {
|
|
||||||
const limit = params.limit || 10000;
|
|
||||||
let startSeq = { ts: 0 };
|
|
||||||
if (params.startSeq) {
|
|
||||||
try {
|
|
||||||
// parse the opaque JSON string passed through from a
|
|
||||||
// previous 'info' event
|
|
||||||
startSeq = JSON.parse(params.startSeq);
|
|
||||||
} catch (err) {
|
|
||||||
this.logger.error('malformed startSeq', {
|
|
||||||
startSeq: params.startSeq,
|
|
||||||
});
|
|
||||||
// start over if malformed
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const recordStream = new ListRecordStream(this.logger, startSeq.uniqID);
|
|
||||||
|
|
||||||
this.coll = this.db.collection('oplog.rs');
|
|
||||||
return this.coll.find({
|
|
||||||
ns: this.oplogNsRegExp,
|
|
||||||
ts: { $gte: Timestamp.fromNumber(startSeq.ts) },
|
|
||||||
}, {
|
|
||||||
limit,
|
|
||||||
tailable: false,
|
|
||||||
awaitData: false,
|
|
||||||
noCursorTimeout: true,
|
|
||||||
oplogReplay: true,
|
|
||||||
numberOfRetries: Number.MAX_VALUE,
|
|
||||||
}, (err, res) => {
|
|
||||||
res.stream().pipe(recordStream);
|
|
||||||
recordStream.removeAllListeners('error');
|
|
||||||
return cb(null, { log: recordStream });
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = LogConsumer;
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,173 +0,0 @@
|
||||||
# Mongoclient
|
|
||||||
|
|
||||||
We introduce a new metadata backend called *mongoclient* for
|
|
||||||
[MongoDB](https://www.mongodb.com). This backend takes advantage of
|
|
||||||
MongoDB being a document store to store the metadata (bucket and
|
|
||||||
object attributes) as JSON objects.
|
|
||||||
|
|
||||||
## Overall Design
|
|
||||||
|
|
||||||
The mongoclient backend strictly follows the metadata interface that
|
|
||||||
stores bucket and object attributes, which consists of the methods
|
|
||||||
createBucket(), getBucketAttributes(), getBucketAndObject()
|
|
||||||
(attributes), putBucketAttributes(), deleteBucket(), putObject(),
|
|
||||||
getObject(), deleteObject(), listObject(), listMultipartUploads() and
|
|
||||||
the management methods getUUID(), getDiskUsage() and countItems(). The
|
|
||||||
mongoclient backend also knows how to deal with versioning, it is also
|
|
||||||
compatible with the various listing algorithms implemented in Arsenal.
|
|
||||||
|
|
||||||
FIXME: There should be a document describing the metadata (currently
|
|
||||||
duck-typing) interface.
|
|
||||||
|
|
||||||
### Why Using MongoDB for Storing Bucket and Object Attributes
|
|
||||||
|
|
||||||
We chose MongoDB for various reasons:
|
|
||||||
|
|
||||||
- MongoDB supports replication, especially through the Raft protocol.
|
|
||||||
|
|
||||||
- MongoDB supports a basic replication scheme called 'Replica Set' and
|
|
||||||
more advanced sharding schemes if required.
|
|
||||||
|
|
||||||
- MongoDB is open source and an enterprise standard.
|
|
||||||
|
|
||||||
- MongoDB is a document store (natively supports JSON) and supports a
|
|
||||||
very flexible search interface.
|
|
||||||
|
|
||||||
### Choice of Mongo Client Library
|
|
||||||
|
|
||||||
We chose to use the official MongoDB driver for NodeJS:
|
|
||||||
[https://github.com/mongodb/node-mongodb-native](https://github.com/mongodb/node-mongodb-native)
|
|
||||||
|
|
||||||
### Granularity for Buckets
|
|
||||||
|
|
||||||
We chose to have one collection for one bucket mapping. First because
|
|
||||||
in a simple mode of replication called 'replica set' it works from the
|
|
||||||
get-go, but if one or many buckets grow to big it is possible to use
|
|
||||||
more advanced schemes such as sharding. MongoDB supports a mix of
|
|
||||||
sharded and non-sharded collections.
|
|
||||||
|
|
||||||
### Storing Database Information
|
|
||||||
|
|
||||||
We need a special collection called the *Infostore* (stored under the
|
|
||||||
name __infostore which is impossible to create through the S3 bucket
|
|
||||||
naming scheme) to store specific database properties such as the
|
|
||||||
unique *uuid* for Orbit.
|
|
||||||
|
|
||||||
### Storing Bucket Attributes
|
|
||||||
|
|
||||||
We need to use a special collection called the *Metastore* (stored
|
|
||||||
under the name __metastore which is impossible to create through the
|
|
||||||
S3 bucket naming scheme).
|
|
||||||
|
|
||||||
### Versioning Format
|
|
||||||
|
|
||||||
We chose to keep the same versioning format that we use in some other
|
|
||||||
Scality products in order to facilitate the compatibility between the
|
|
||||||
different products.
|
|
||||||
|
|
||||||
FIXME: Document the versioning internals in the upper layers and
|
|
||||||
document the versioning format
|
|
||||||
|
|
||||||
### Dealing with Concurrency
|
|
||||||
|
|
||||||
We chose not to use transactions (aka
|
|
||||||
[https://docs.mongodb.com/manual/tutorial/perform-two-phase-commits/)
|
|
||||||
because it is a known fact there is an overhead of using them, and we
|
|
||||||
thought there was no real need for them since we could leverage Mongo
|
|
||||||
ordered operations guarantees and atomic writes.
|
|
||||||
|
|
||||||
Example of corner cases:
|
|
||||||
|
|
||||||
#### CreateBucket()
|
|
||||||
|
|
||||||
Since it is not possible to create a collection AND at the same time
|
|
||||||
register the bucket in the Metastore we chose to only update the
|
|
||||||
Metastore. A non-existing collection (NamespaceNotFound error in
|
|
||||||
Mongo) is one possible normal state for an empty bucket.
|
|
||||||
|
|
||||||
#### DeleteBucket()
|
|
||||||
|
|
||||||
In this case the bucket is *locked* by the upper layers (use of a
|
|
||||||
transient delete flag) so we don't have to worry about that and by the
|
|
||||||
fact the bucket is empty neither (which is also checked by the upper
|
|
||||||
layers).
|
|
||||||
|
|
||||||
We first drop() the collection and then we asynchronously delete the
|
|
||||||
bucket name entry from the metastore (the removal from the metastore
|
|
||||||
is atomic which is not absolutely necessary in this case but more
|
|
||||||
robust in term of design).
|
|
||||||
|
|
||||||
If we fail in between we still have an entry in the metastore which is
|
|
||||||
good because we need to manage the delete flag. For the upper layers
|
|
||||||
the operation has not completed until this flag is removed. The upper
|
|
||||||
layers will restart the deleteBucket() which is fine because we manage
|
|
||||||
the case where the collection does not exist.
|
|
||||||
|
|
||||||
#### PutObject() with a Version
|
|
||||||
|
|
||||||
We need to store the versioned object then update the master object
|
|
||||||
(the latest version). For this we use the
|
|
||||||
[BulkWrite](http://mongodb.github.io/node-mongodb-native/3.0/api/Collection.html#bulkWrite)
|
|
||||||
method. This is not a transaction but guarantees that the 2 operations
|
|
||||||
will happen sequentially in the MongoDB oplog. Indeed if the
|
|
||||||
BulkWrite() fails in between we would end up creating an orphan (which
|
|
||||||
is not critical) but if the operation succeeds then we are sure that
|
|
||||||
the master is always pointing to the right object. If there is a
|
|
||||||
concurrency between 2 clients then we are sure that the 2 groups of
|
|
||||||
operations will be clearly decided in the oplog (the last writer will
|
|
||||||
win).
|
|
||||||
|
|
||||||
#### DeleteObject()
|
|
||||||
|
|
||||||
This is probably the most complex case to manage because it involves a
|
|
||||||
lot of different cases:
|
|
||||||
|
|
||||||
##### Deleting an Object when Versioning is not Enabled
|
|
||||||
|
|
||||||
This case is a straightforward atomic delete. Atomicity is not really
|
|
||||||
required because we assume version IDs are random enough but it is
|
|
||||||
more robust to do so.
|
|
||||||
|
|
||||||
##### Deleting an Object when Versioning is Enabled
|
|
||||||
|
|
||||||
This case is more complex since we have to deal with the 2 cases:
|
|
||||||
|
|
||||||
Case 1: The caller asks for a deletion of a version which is not a master:
|
|
||||||
This case is a straight-forward atomic delete.
|
|
||||||
|
|
||||||
Case 2: The caller asks for a deletion of a version which is the master: In
|
|
||||||
this case we need to create a special flag called PHD (as PlaceHolDer)
|
|
||||||
that indicates the master is no longer valid (with a new unique
|
|
||||||
virtual version ID). We force the ordering of operations in a
|
|
||||||
bulkWrite() to first replace the master with the PHD flag and then
|
|
||||||
physically delete the version. If the call fail in between we will be
|
|
||||||
left with a master with a PHD flag. If the call succeeds we try to
|
|
||||||
find if the master with the PHD flag is left alone in such case we
|
|
||||||
delete it otherwise we trigger an asynchronous repair that will spawn
|
|
||||||
after AYNC_REPAIR_TIMEOUT=15s that will reassign the master to the
|
|
||||||
latest version.
|
|
||||||
|
|
||||||
In all cases the physical deletion or the repair of the master are
|
|
||||||
checked against the PHD flag AND the actual unique virtual version
|
|
||||||
ID. We do this to check against potential concurrent deletions,
|
|
||||||
repairs or updates. Only the last writer/deleter has the right to
|
|
||||||
physically perform the operation, otherwise it is superseded by other
|
|
||||||
operations.
|
|
||||||
|
|
||||||
##### Getting an object with a PHD flag
|
|
||||||
|
|
||||||
If the caller is asking for the latest version of an object and the
|
|
||||||
PHD flag is set we perform a search on the bucket to find the latest
|
|
||||||
version and we return it.
|
|
||||||
|
|
||||||
#### Listing Objects
|
|
||||||
|
|
||||||
The mongoclient backend implements a readable key/value stream called
|
|
||||||
*MongoReadStream* that follows the LevelDB duck typing interface used
|
|
||||||
in Arsenal/lib/algos listing algorithms. Note it does not require any
|
|
||||||
LevelDB package.
|
|
||||||
|
|
||||||
#### Generating the UUID
|
|
||||||
|
|
||||||
To avoid race conditions we always (try to) generate a new UUID and we
|
|
||||||
condition the insertion to the non-existence of the document.
|
|
|
@ -1,137 +0,0 @@
|
||||||
const Readable = require('stream').Readable;
|
|
||||||
const MongoUtils = require('./utils');
|
|
||||||
|
|
||||||
class MongoReadStream extends Readable {
|
|
||||||
constructor(c, options, searchOptions) {
|
|
||||||
super({
|
|
||||||
objectMode: true,
|
|
||||||
highWaterMark: 0,
|
|
||||||
});
|
|
||||||
|
|
||||||
if (options.limit === 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const query = {
|
|
||||||
_id: {},
|
|
||||||
};
|
|
||||||
if (options.reverse) {
|
|
||||||
if (options.start) {
|
|
||||||
query._id.$lte = options.start;
|
|
||||||
}
|
|
||||||
if (options.end) {
|
|
||||||
query._id.$gte = options.end;
|
|
||||||
}
|
|
||||||
if (options.gt) {
|
|
||||||
query._id.$lt = options.gt;
|
|
||||||
}
|
|
||||||
if (options.gte) {
|
|
||||||
query._id.$lte = options.gte;
|
|
||||||
}
|
|
||||||
if (options.lt) {
|
|
||||||
query._id.$gt = options.lt;
|
|
||||||
}
|
|
||||||
if (options.lte) {
|
|
||||||
query._id.$gte = options.lte;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (options.start) {
|
|
||||||
query._id.$gte = options.start;
|
|
||||||
}
|
|
||||||
if (options.end) {
|
|
||||||
query._id.$lte = options.end;
|
|
||||||
}
|
|
||||||
if (options.gt) {
|
|
||||||
query._id.$gt = options.gt;
|
|
||||||
}
|
|
||||||
if (options.gte) {
|
|
||||||
query._id.$gte = options.gte;
|
|
||||||
}
|
|
||||||
if (options.lt) {
|
|
||||||
query._id.$lt = options.lt;
|
|
||||||
}
|
|
||||||
if (options.lte) {
|
|
||||||
query._id.$lte = options.lte;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!Object.keys(query._id).length) {
|
|
||||||
delete query._id;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (searchOptions) {
|
|
||||||
Object.assign(query, searchOptions);
|
|
||||||
}
|
|
||||||
|
|
||||||
this._cursor = c.find(query).sort({
|
|
||||||
_id: options.reverse ? -1 : 1,
|
|
||||||
});
|
|
||||||
if (options.limit && options.limit !== -1) {
|
|
||||||
this._cursor = this._cursor.limit(options.limit);
|
|
||||||
}
|
|
||||||
this._options = options;
|
|
||||||
this._destroyed = false;
|
|
||||||
this.on('end', this._cleanup.bind(this));
|
|
||||||
}
|
|
||||||
|
|
||||||
_read() {
|
|
||||||
if (this._destroyed) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
this._cursor.next((err, doc) => {
|
|
||||||
if (this._destroyed) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (err) {
|
|
||||||
this.emit('error', err);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let key = undefined;
|
|
||||||
let value = undefined;
|
|
||||||
|
|
||||||
if (doc) {
|
|
||||||
key = doc._id;
|
|
||||||
MongoUtils.unserialize(doc.value);
|
|
||||||
value = JSON.stringify(doc.value);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (key === undefined && value === undefined) {
|
|
||||||
this.push(null);
|
|
||||||
} else if (this._options.keys !== false &&
|
|
||||||
this._options.values === false) {
|
|
||||||
this.push(key);
|
|
||||||
} else if (this._options.keys === false &&
|
|
||||||
this._options.values !== false) {
|
|
||||||
this.push(value);
|
|
||||||
} else {
|
|
||||||
this.push({
|
|
||||||
key,
|
|
||||||
value,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
_cleanup() {
|
|
||||||
if (this._destroyed) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
this._destroyed = true;
|
|
||||||
|
|
||||||
this._cursor.close(err => {
|
|
||||||
if (err) {
|
|
||||||
this.emit('error', err);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
this.emit('close');
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
destroy() {
|
|
||||||
return this._cleanup();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = MongoReadStream;
|
|
|
@ -1,30 +0,0 @@
|
||||||
|
|
||||||
function escape(obj) {
|
|
||||||
return JSON.parse(JSON.stringify(obj).
|
|
||||||
replace(/\$/g, '\uFF04').
|
|
||||||
replace(/\./g, '\uFF0E'));
|
|
||||||
}
|
|
||||||
|
|
||||||
function unescape(obj) {
|
|
||||||
return JSON.parse(JSON.stringify(obj).
|
|
||||||
replace(/\uFF04/g, '$').
|
|
||||||
replace(/\uFF0E/g, '.'));
|
|
||||||
}
|
|
||||||
|
|
||||||
function serialize(objMD) {
|
|
||||||
// Tags require special handling since dot and dollar are accepted
|
|
||||||
if (objMD.tags) {
|
|
||||||
// eslint-disable-next-line
|
|
||||||
objMD.tags = escape(objMD.tags);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function unserialize(objMD) {
|
|
||||||
// Tags require special handling
|
|
||||||
if (objMD.tags) {
|
|
||||||
// eslint-disable-next-line
|
|
||||||
objMD.tags = unescape(objMD.tags);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = { escape, unescape, serialize, unserialize };
|
|
|
@ -1,317 +0,0 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
|
||||||
|
|
||||||
const errors = require('../../../errors');
|
|
||||||
const BucketInfo = require('../../../models/BucketInfo');
|
|
||||||
const { getURIComponents, getRequestBody, sendResponse } = require('./utils');
|
|
||||||
|
|
||||||
class BucketdRoutes {
|
|
||||||
/**
|
|
||||||
* Create a new Bucketd routes instance
|
|
||||||
* This class implements the bucketd Metadata protocol and is used in
|
|
||||||
* the Metadata Proxy Server to implement this protocol on top of
|
|
||||||
* various metadata backends.
|
|
||||||
*
|
|
||||||
* Implementation note: the adaptations performed in the methods of
|
|
||||||
* the class MetadataWrapper are not required in this context.
|
|
||||||
* For this reason, the methods of the `client' instance are directly
|
|
||||||
* called from this class, somewhat defeating the encapsulation of the
|
|
||||||
* wrapper.
|
|
||||||
*
|
|
||||||
* @param {Arsenal.storage.metadata.MetadataWrapper} metadataWrapper - to
|
|
||||||
* be used as a translation target for the bucketd protocol.
|
|
||||||
* @param {werelogs.Logger} logger -
|
|
||||||
*/
|
|
||||||
constructor(metadataWrapper, logger) {
|
|
||||||
this._metadataWrapper = metadataWrapper;
|
|
||||||
this._logger = logger;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Metadata Wrapper's wrapper
|
|
||||||
|
|
||||||
// `attributes' context methods
|
|
||||||
|
|
||||||
_getBucketAttributes(req, res, bucketName, logger) {
|
|
||||||
return this._metadataWrapper.client.getBucketAttributes(
|
|
||||||
bucketName, logger, (err, data) => {
|
|
||||||
if (err) {
|
|
||||||
logger.error('Failed to get bucket attributes',
|
|
||||||
{ bucket: bucketName, error: err });
|
|
||||||
return sendResponse(req, res, logger, err);
|
|
||||||
}
|
|
||||||
if (data === undefined) {
|
|
||||||
return sendResponse(req, res, logger,
|
|
||||||
errors.NoSuchBucket);
|
|
||||||
}
|
|
||||||
return sendResponse(req, res, logger, null,
|
|
||||||
BucketInfo.fromObj(data).serialize());
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
_putBucketAttributes(req, res, bucketName, data, logger) {
|
|
||||||
return this._metadataWrapper.client.putBucketAttributes(
|
|
||||||
bucketName, BucketInfo.deSerialize(data), logger, err =>
|
|
||||||
sendResponse(req, res, logger, err));
|
|
||||||
}
|
|
||||||
|
|
||||||
// `bucket' context methods
|
|
||||||
|
|
||||||
_createBucket(req, res, bucketName, data, logger) {
|
|
||||||
return this._metadataWrapper.client.createBucket(
|
|
||||||
bucketName, BucketInfo.deSerialize(data), logger, err =>
|
|
||||||
sendResponse(req, res, logger, err));
|
|
||||||
}
|
|
||||||
|
|
||||||
_deleteBucket(req, res, bucketName, logger) {
|
|
||||||
return this._metadataWrapper.client.deleteBucket(
|
|
||||||
bucketName, logger, err =>
|
|
||||||
sendResponse(req, res, logger, err));
|
|
||||||
}
|
|
||||||
|
|
||||||
_putObject(req, res, bucketName, objectName, objectValue, params, logger) {
|
|
||||||
let parsedValue;
|
|
||||||
try {
|
|
||||||
parsedValue = JSON.parse(objectValue);
|
|
||||||
} catch (err) {
|
|
||||||
logger.error('Malformed JSON value', { value: objectValue });
|
|
||||||
return sendResponse(req, res, logger, errors.BadRequest);
|
|
||||||
}
|
|
||||||
return this._metadataWrapper.client.putObject(
|
|
||||||
bucketName, objectName, parsedValue,
|
|
||||||
params, logger, (err, data) =>
|
|
||||||
sendResponse(req, res, logger, err, data));
|
|
||||||
}
|
|
||||||
|
|
||||||
_getObject(req, res, bucketName, objectName, params, logger) {
|
|
||||||
return this._metadataWrapper.client.getObject(
|
|
||||||
bucketName, objectName, params, logger, (err, data) =>
|
|
||||||
sendResponse(req, res, logger, err, data));
|
|
||||||
}
|
|
||||||
|
|
||||||
_deleteObject(req, res, bucketName, objectName, params, logger) {
|
|
||||||
return this._metadataWrapper.client.deleteObject(
|
|
||||||
bucketName, objectName, params, logger, (err, data) =>
|
|
||||||
sendResponse(req, res, logger, err, data));
|
|
||||||
}
|
|
||||||
|
|
||||||
_listObject(req, res, bucketName, params, logger) {
|
|
||||||
const listingParameters = params || {};
|
|
||||||
if (listingParameters.listingType === undefined) {
|
|
||||||
listingParameters.listingType = 'Delimiter';
|
|
||||||
}
|
|
||||||
if (listingParameters.maxKeys) {
|
|
||||||
listingParameters.maxKeys = Number.parseInt(params.maxKeys, 10);
|
|
||||||
}
|
|
||||||
return this._metadataWrapper.client.listObject(
|
|
||||||
bucketName, listingParameters, logger, (err, data) =>
|
|
||||||
sendResponse(req, res, logger, err, data));
|
|
||||||
}
|
|
||||||
|
|
||||||
_createRequestLogger(req) {
|
|
||||||
const uids = req.headers['x-scal-request-uids'];
|
|
||||||
const logger = uids === undefined ?
|
|
||||||
this._logger.newRequestLogger() :
|
|
||||||
this._logger.newRequestLoggerFromSerializedUids(uids);
|
|
||||||
logger.trace('new request', { method: req.method, url: req.url });
|
|
||||||
return logger;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Internal routes
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Handle routes related to operations on bucket attributes
|
|
||||||
*
|
|
||||||
* @param {http.IncomingMessage} req - request being processed
|
|
||||||
* @param {http.OutgoingMessage} res - response associated to the request
|
|
||||||
* @param {object} uriComponents -
|
|
||||||
* @param {werelogs.Logger} logger -
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
_attributesRoutes(req, res, uriComponents, logger) {
|
|
||||||
if (uriComponents.bucketName === undefined) {
|
|
||||||
logger.error('Missing bucket name for attributes route',
|
|
||||||
{ uriComponents });
|
|
||||||
return sendResponse(req, res, logger, errors.BadRequest);
|
|
||||||
}
|
|
||||||
switch (req.method) {
|
|
||||||
case 'GET':
|
|
||||||
return this._getBucketAttributes(
|
|
||||||
req, res,
|
|
||||||
uriComponents.bucketName, logger, (err, attrs) =>
|
|
||||||
sendResponse(req, res, logger, err, attrs));
|
|
||||||
case 'POST':
|
|
||||||
return getRequestBody(logger, req, (err, body) => {
|
|
||||||
if (err) {
|
|
||||||
return sendResponse(req, res, logger, err);
|
|
||||||
}
|
|
||||||
return this._putBucketAttributes(
|
|
||||||
req, res,
|
|
||||||
uriComponents.bucketName, body, logger, err =>
|
|
||||||
sendResponse(req, res, logger, err));
|
|
||||||
});
|
|
||||||
default:
|
|
||||||
return sendResponse(req, res, logger, errors.RouteNotFound);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Handle routes related to operations on buckets
|
|
||||||
*
|
|
||||||
* @param {http.IncomingMessage} req - request being processed
|
|
||||||
* @param {http.OutgoingMessage} res - response associated to the request
|
|
||||||
* @param {object} uriComponents - URI breakdown of the request to process
|
|
||||||
* @param {string} uriComponents.namespace - Select the control plane with
|
|
||||||
* `_' or the data plane with
|
|
||||||
* `default'.
|
|
||||||
* @param {string} uriComponents.context - Targets the bucket itself with
|
|
||||||
* `attributes' or the content of
|
|
||||||
* the bucket with `bucket'.
|
|
||||||
* @param {string} uriComponents.bucketName - The name of the bucket
|
|
||||||
* @param {string} uriComponents.objectName - the key of the object in the
|
|
||||||
* bucket
|
|
||||||
* @param {werelogs.Logger} logger -
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
_bucketRoutes(req, res, uriComponents, logger) {
|
|
||||||
if (uriComponents.bucketName === undefined) {
|
|
||||||
logger.error('Missing bucket name for bucket route',
|
|
||||||
{ uriComponents });
|
|
||||||
return sendResponse(req, res, logger, errors.BadRequest);
|
|
||||||
}
|
|
||||||
switch (req.method) {
|
|
||||||
case 'GET':
|
|
||||||
return this._listObject(req, res,
|
|
||||||
uriComponents.bucketName,
|
|
||||||
uriComponents.options,
|
|
||||||
logger);
|
|
||||||
case 'DELETE':
|
|
||||||
return this._deleteBucket(req, res,
|
|
||||||
uriComponents.bucketName, logger);
|
|
||||||
case 'POST':
|
|
||||||
return getRequestBody(logger, req, (err, body) => {
|
|
||||||
if (err) {
|
|
||||||
return sendResponse(req, res, logger, err);
|
|
||||||
}
|
|
||||||
return this._createBucket(req, res,
|
|
||||||
uriComponents.bucketName,
|
|
||||||
body, logger);
|
|
||||||
});
|
|
||||||
default:
|
|
||||||
return sendResponse(req, res, logger, errors.RouteNotFound);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Handle routes related to operations on objects
|
|
||||||
*
|
|
||||||
* @param {http.IncomingMessage} req - request being processed
|
|
||||||
* @param {http.OutgoingMessage} res - response associated to the request
|
|
||||||
* @param {object} uriComponents -
|
|
||||||
* @param {werelogs.Logger} logger -
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
_objectRoutes(req, res, uriComponents, logger) {
|
|
||||||
if (uriComponents.bucketName === undefined) {
|
|
||||||
logger.error('Missing bucket name for bucket route',
|
|
||||||
{ uriComponents });
|
|
||||||
return sendResponse(req, res, logger, errors.BadRequest);
|
|
||||||
}
|
|
||||||
switch (req.method) {
|
|
||||||
case 'GET':
|
|
||||||
return this._getObject(req, res,
|
|
||||||
uriComponents.bucketName,
|
|
||||||
uriComponents.objectName,
|
|
||||||
uriComponents.options,
|
|
||||||
logger);
|
|
||||||
case 'DELETE':
|
|
||||||
return this._deleteObject(req, res,
|
|
||||||
uriComponents.bucketName,
|
|
||||||
uriComponents.objectName,
|
|
||||||
uriComponents.options,
|
|
||||||
logger);
|
|
||||||
case 'POST':
|
|
||||||
return getRequestBody(logger, req, (err, body) =>
|
|
||||||
this._putObject(req, res,
|
|
||||||
uriComponents.bucketName,
|
|
||||||
uriComponents.objectName,
|
|
||||||
body,
|
|
||||||
uriComponents.options,
|
|
||||||
logger));
|
|
||||||
default:
|
|
||||||
return sendResponse(req, res, logger, errors.RouteNotFound);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Handle default routes. e.g. URI starting with /default/
|
|
||||||
* (or anything excepted an underscore)
|
|
||||||
*
|
|
||||||
* @param {http.IncomingMessage} req - request being processed
|
|
||||||
* @param {http.OutgoingMessage} res - response associated to the request
|
|
||||||
* @param {object} uriComponents -
|
|
||||||
* @param {werelogs.Logger} logger -
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
_defaultRoutes(req, res, uriComponents, logger) {
|
|
||||||
switch (uriComponents.context) {
|
|
||||||
case 'leader':
|
|
||||||
case 'informations':
|
|
||||||
case 'parallel':
|
|
||||||
logger.trace(`${uriComponents.context} operation`);
|
|
||||||
return sendResponse(req, res, logger, errors.NotImplemented);
|
|
||||||
case 'metadataInformation':
|
|
||||||
return sendResponse(req, res, logger, undefined,
|
|
||||||
'{"metadataVersion":2}');
|
|
||||||
case 'bucket':
|
|
||||||
logger.trace(`${uriComponents.context} operation`);
|
|
||||||
if (uriComponents.objectName) {
|
|
||||||
return this._objectRoutes(req, res, uriComponents, logger);
|
|
||||||
}
|
|
||||||
return this._bucketRoutes(req, res, uriComponents, logger);
|
|
||||||
case 'attributes':
|
|
||||||
logger.trace(`${uriComponents.context} operation`);
|
|
||||||
return this._attributesRoutes(req, res, uriComponents, logger);
|
|
||||||
default:
|
|
||||||
logger.error('invalid URI', { uriComponents });
|
|
||||||
return sendResponse(req, res, logger, errors.RouteNotFound);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Handle admin routes. e.g. URI starting with /_/
|
|
||||||
*
|
|
||||||
* @param {http.IncomingMessage} req - request being processed
|
|
||||||
* @param {http.OutgoingMessage} res - response associated to the request
|
|
||||||
* @param {object} uriComponents -
|
|
||||||
* @param {werelogs.Logger} logger -
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
_adminRoutes(req, res, uriComponents, logger) {
|
|
||||||
return sendResponse(req, res, logger, errors.NotImplemented);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The route dispatching method
|
|
||||||
|
|
||||||
/**
|
|
||||||
* dispatch the HTTP request to the appropriate handling function.
|
|
||||||
*
|
|
||||||
* @param {http.IncomingMessage} req - request being processed
|
|
||||||
* @param {http.OutgoingMessage} res - response associated to the request
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
dispatch(req, res) {
|
|
||||||
const adminNamespace = '_';
|
|
||||||
const logger = this._createRequestLogger(req);
|
|
||||||
const uriComponents = getURIComponents(req.url, logger);
|
|
||||||
if (!uriComponents) {
|
|
||||||
return sendResponse(req, res, logger, errors.BadRequest);
|
|
||||||
}
|
|
||||||
switch (uriComponents.namespace) {
|
|
||||||
case adminNamespace:
|
|
||||||
return this._adminRoutes(req, res, uriComponents, logger);
|
|
||||||
default: // coincidently matches the `default' literal namespace as well
|
|
||||||
return this._defaultRoutes(req, res, uriComponents, logger);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = BucketdRoutes;
|
|
|
@ -1,33 +0,0 @@
|
||||||
# Metatada Proxy Server
|
|
||||||
|
|
||||||
## Design goals
|
|
||||||
|
|
||||||
## Design choices
|
|
||||||
|
|
||||||
## Implementation details
|
|
||||||
|
|
||||||
## How to run the proxy server
|
|
||||||
|
|
||||||
```js
|
|
||||||
const werelogs = require('werelogs');
|
|
||||||
const MetadataWrapper = require('arsenal')
|
|
||||||
.storage.metadata.MetadataWrapper;
|
|
||||||
const Server = require('arsenal')
|
|
||||||
.storage.metadata.proxy.Server;
|
|
||||||
|
|
||||||
const logger = new werelogs.Logger('MetadataProxyServer',
|
|
||||||
'debug', 'debug');
|
|
||||||
const metadataWrapper = new MetadataWrapper('mem', {},
|
|
||||||
null, logger);
|
|
||||||
const server = new Server(metadataWrapper,
|
|
||||||
{
|
|
||||||
port: 9001,
|
|
||||||
workers: 1,
|
|
||||||
},
|
|
||||||
logger);
|
|
||||||
server.start(() => {
|
|
||||||
logger.info('Metadata Proxy Server successfully started. ' +
|
|
||||||
`Using the ${metadataWrapper.implName} backend`);
|
|
||||||
});
|
|
||||||
|
|
||||||
```
|
|
|
@ -1,105 +0,0 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
|
||||||
|
|
||||||
const cluster = require('cluster');
|
|
||||||
|
|
||||||
const HttpServer = require('../../../network/http/server');
|
|
||||||
const BucketdRoutes = require('./BucketdRoutes');
|
|
||||||
|
|
||||||
const requiresOneWorker = {
|
|
||||||
// in memory kvs storage is not shared across processes
|
|
||||||
memorybucket: true,
|
|
||||||
};
|
|
||||||
|
|
||||||
class Server {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a new Metadata Proxy Server instance
|
|
||||||
*
|
|
||||||
* The Metadata Proxy Server is an HTTP server that translates
|
|
||||||
* requests of the bucketd sub-protocol into function calls to
|
|
||||||
* a properly configured MetadataWrapper instance. Such instance
|
|
||||||
* can use any of the available metadata backends available.
|
|
||||||
*
|
|
||||||
* @param {arsenal.storage.metadata.MetadataWrapper} metadataWrapper -
|
|
||||||
* @param {Object} configuration -
|
|
||||||
* @param {number} configuration.port -
|
|
||||||
* @param {number} configuration.workers -
|
|
||||||
* @param {werelogs.Logger} logger -
|
|
||||||
*/
|
|
||||||
constructor(metadataWrapper, configuration, logger) {
|
|
||||||
this._configuration = configuration;
|
|
||||||
if (requiresOneWorker[metadataWrapper.implName] &&
|
|
||||||
this._configuration.workers !== 1) {
|
|
||||||
logger.warn('This metadata backend requires only one worker',
|
|
||||||
{ metadataBackend: metadataWrapper.implName });
|
|
||||||
this._configuration.workers = 1;
|
|
||||||
}
|
|
||||||
this._logger = logger;
|
|
||||||
this._metadataWrapper = metadataWrapper;
|
|
||||||
|
|
||||||
this._proxyRoutes = new BucketdRoutes(metadataWrapper, this._logger);
|
|
||||||
this._httpServer = null;
|
|
||||||
this._installSignalHandlers();
|
|
||||||
}
|
|
||||||
|
|
||||||
_cleanup() {
|
|
||||||
if (cluster.isWorker) {
|
|
||||||
this._logger.info('Server worker shutting down...');
|
|
||||||
this._httpServer.stop();
|
|
||||||
} else {
|
|
||||||
this._logger.info('Server shutting down...');
|
|
||||||
}
|
|
||||||
return process.exit(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
_installSignalHandlers() {
|
|
||||||
process.on('SIGINT', () => { this._cleanup(); });
|
|
||||||
process.on('SIGHUP', () => { this._cleanup(); });
|
|
||||||
process.on('SIGQUIT', () => { this._cleanup(); });
|
|
||||||
process.on('SIGTERM', () => { this._cleanup(); });
|
|
||||||
process.on('SIGPIPE', () => {});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Start the Metadata Proxy Server instance
|
|
||||||
*
|
|
||||||
* @param {Function} cb - called with no argument when the onListening event
|
|
||||||
* is triggered
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
start(cb) {
|
|
||||||
if (cluster.isMaster) {
|
|
||||||
for (let i = 0; i < this._configuration.workers; ++i) {
|
|
||||||
cluster.fork();
|
|
||||||
}
|
|
||||||
cluster.on('disconnect', worker => {
|
|
||||||
this._logger
|
|
||||||
.info(`worker ${worker.process.pid} exited, respawning.`);
|
|
||||||
cluster.fork();
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
this._httpServer = new HttpServer(this._configuration.port,
|
|
||||||
this._logger);
|
|
||||||
if (this._configuration.bindAddress) {
|
|
||||||
this._httpServer.setBindAddress(
|
|
||||||
this._configuration.bindAddress);
|
|
||||||
}
|
|
||||||
this._httpServer
|
|
||||||
.onRequest((req, res) => this._proxyRoutes.dispatch(req, res))
|
|
||||||
.onListening(() => {
|
|
||||||
this._logger.info(
|
|
||||||
'Metadata Proxy Server now listening on' +
|
|
||||||
` port ${this._configuration.port}`);
|
|
||||||
if (cb) {
|
|
||||||
return this._metadataWrapper.setup(cb);
|
|
||||||
}
|
|
||||||
return this._metadataWrapper.setup(() => {
|
|
||||||
this._logger.info('MetadataWrapper setup complete.');
|
|
||||||
});
|
|
||||||
})
|
|
||||||
.start();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = Server;
|
|
|
@ -1,183 +0,0 @@
|
||||||
const url = require('url');
|
|
||||||
const querystring = require('querystring');
|
|
||||||
const errors = require('../../../errors');
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extracts components from URI.
|
|
||||||
* @param {string} uri - uri part of the received request
|
|
||||||
* @param {werelogs.Logger} logger -
|
|
||||||
* @return {object} ret - URI breakdown of the request to process
|
|
||||||
* @return {string} ret.namespace - targeted plane, control plane is targeted
|
|
||||||
* with `_' and the data plane with `default'.
|
|
||||||
* @return {string} ret.context - Targets the bucket itself with
|
|
||||||
* `attributes' or the content of
|
|
||||||
* the bucket with `bucket'.
|
|
||||||
* @return {string} ret.bucketName - The name of the bucket
|
|
||||||
* @return {string} ret.objectName - the key of the object in the bucket
|
|
||||||
*/
|
|
||||||
function getURIComponents(uri, logger) {
|
|
||||||
try {
|
|
||||||
if (uri.charAt(0) !== '/') {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
const { pathname, query } = url.parse(uri);
|
|
||||||
const options = query ? querystring.parse(query) : {};
|
|
||||||
const typeIndex = pathname.indexOf('/', 1);
|
|
||||||
const bucketIndex = pathname.indexOf('/', typeIndex + 1);
|
|
||||||
const objectIndex = pathname.indexOf('/', bucketIndex + 1);
|
|
||||||
|
|
||||||
if (typeIndex === -1 || typeIndex === pathname.length - 1) {
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
if (bucketIndex === -1) {
|
|
||||||
return {
|
|
||||||
namespace: pathname.substring(1, typeIndex),
|
|
||||||
context: pathname.substring(typeIndex + 1),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
if (bucketIndex === pathname.length - 1) {
|
|
||||||
return {
|
|
||||||
namespace: pathname.substring(1, typeIndex),
|
|
||||||
context: pathname.substring(typeIndex + 1, bucketIndex),
|
|
||||||
};
|
|
||||||
}
|
|
||||||
if (objectIndex === -1) {
|
|
||||||
return {
|
|
||||||
namespace: pathname.substring(1, typeIndex),
|
|
||||||
context: pathname.substring(typeIndex + 1, bucketIndex),
|
|
||||||
bucketName: pathname.substring(bucketIndex + 1),
|
|
||||||
options,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
if (objectIndex === pathname.length - 1) {
|
|
||||||
return {
|
|
||||||
namespace: pathname.substring(1, typeIndex),
|
|
||||||
context: pathname.substring(typeIndex + 1, bucketIndex),
|
|
||||||
bucketName: pathname.substring(bucketIndex + 1, objectIndex),
|
|
||||||
options,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
return {
|
|
||||||
namespace: pathname.substring(1, typeIndex),
|
|
||||||
context: pathname.substring(typeIndex + 1, bucketIndex),
|
|
||||||
bucketName: pathname.substring(bucketIndex + 1, objectIndex),
|
|
||||||
objectName: decodeURIComponent(pathname.substring(objectIndex + 1)),
|
|
||||||
options,
|
|
||||||
};
|
|
||||||
} catch (ex) {
|
|
||||||
logger.error('Invalid URI: failed to parse',
|
|
||||||
{ uri, error: ex, errorStack: ex.stack,
|
|
||||||
message: ex.message });
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Extracts the body of the request through a callback
|
|
||||||
* @param {werelogs.Logger} logger - instance of the logger that will emit the
|
|
||||||
* log entry
|
|
||||||
* @param {http.IncomingMessage} request - request received from bucketclient
|
|
||||||
* @param {Function} cb - function which has an interest in the request body.
|
|
||||||
* The first parameter is err and may be falsey
|
|
||||||
* The second parameter is the body of the request
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function getRequestBody(logger, request, cb) {
|
|
||||||
const body = [];
|
|
||||||
let bodyLen = 0;
|
|
||||||
request.on('data', data => {
|
|
||||||
body.push(data);
|
|
||||||
bodyLen += data.length;
|
|
||||||
}).on('error', cb).on('end', () => {
|
|
||||||
cb(null, Buffer.concat(body, bodyLen).toString());
|
|
||||||
}).on('close', () => {
|
|
||||||
logger.error('Connection closed by remote peer');
|
|
||||||
/* Execution will eventually reach the sendResponse code which will
|
|
||||||
* trigger the proper cleanup as the remote peer already hung up and
|
|
||||||
* nobody is on the line to get the message */
|
|
||||||
cb(errors.BadRequest);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Emit a log entry corresponding to the end of the request
|
|
||||||
*
|
|
||||||
* @param {werelogs.Logger} logger - instance of the logger that will emit the
|
|
||||||
* log entry
|
|
||||||
* @param {http.IncomingMessage} req - request being processed
|
|
||||||
* @param {object} statusCode - HTTP status code sent back to the client
|
|
||||||
* @param {object} statusMessage - HTTP status message sent back to the client
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function _logRequestEnd(logger, req, statusCode, statusMessage) {
|
|
||||||
const info = {
|
|
||||||
clientIp: req.socket.remoteAddress,
|
|
||||||
clientPort: req.socket.remotePort,
|
|
||||||
httpMethod: req.method,
|
|
||||||
httpURL: req.url,
|
|
||||||
httpCode: statusCode,
|
|
||||||
httpMessage: statusMessage,
|
|
||||||
};
|
|
||||||
logger.end('finished handling request', info);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Request processing exit point, sends back to the client the specified data
|
|
||||||
* and/or error code
|
|
||||||
*
|
|
||||||
* @param {http.IncomingMessage} req - request being processed
|
|
||||||
* @param {http.OutgoingMessage} res - response associated to the request
|
|
||||||
* @param {werelogs.Logger} log - instance of the logger to use
|
|
||||||
* @param {Arsenal.Error} err - if not null, defines the HTTP status
|
|
||||||
* code and message
|
|
||||||
* @param {string} data - if not null, used as the response body. If `data'
|
|
||||||
* isn't a string, it's considered as a JSON object and
|
|
||||||
* it's content get serialized before being sent.
|
|
||||||
* @return {undefined}
|
|
||||||
*/
|
|
||||||
function sendResponse(req, res, log, err, data) {
|
|
||||||
let statusCode;
|
|
||||||
let statusMessage;
|
|
||||||
if (err) {
|
|
||||||
statusCode = err.code;
|
|
||||||
statusMessage = err.message;
|
|
||||||
} else {
|
|
||||||
statusCode = errors.ok.code;
|
|
||||||
statusMessage = errors.ok.message;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (data) {
|
|
||||||
let resData = data;
|
|
||||||
if (typeof resData === 'object') {
|
|
||||||
resData = JSON.stringify(data);
|
|
||||||
} else if (typeof resData === 'number') {
|
|
||||||
resData = resData.toString();
|
|
||||||
}
|
|
||||||
/*
|
|
||||||
* Encoding data to binary provides a hot path to write data
|
|
||||||
* directly to the socket, without node.js trying to encode the data
|
|
||||||
* over and over again.
|
|
||||||
*/
|
|
||||||
const rawData = Buffer.from(resData, 'utf8');
|
|
||||||
/*
|
|
||||||
* Using Buffer.bytelength is not required here because data is binary
|
|
||||||
* encoded, data.length would give us the exact byte length
|
|
||||||
*/
|
|
||||||
res.writeHead(statusCode, statusMessage, {
|
|
||||||
'content-length': rawData.length,
|
|
||||||
});
|
|
||||||
res.write(rawData);
|
|
||||||
} else {
|
|
||||||
res.writeHead(statusCode, statusMessage, { 'content-length': 0 });
|
|
||||||
}
|
|
||||||
return res.end(() => {
|
|
||||||
_logRequestEnd(log, req, statusCode, statusMessage);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
getURIComponents,
|
|
||||||
getRequestBody,
|
|
||||||
sendResponse,
|
|
||||||
};
|
|
File diff suppressed because it is too large
Load Diff
11
package.json
11
package.json
|
@ -1,35 +1,32 @@
|
||||||
{
|
{
|
||||||
"name": "arsenal",
|
"name": "arsenal",
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=8"
|
"node": ">=6.9.5"
|
||||||
},
|
},
|
||||||
"version": "8.0.3",
|
"version": "7.4.0",
|
||||||
"description": "Common utilities for the S3 project components",
|
"description": "Common utilities for the S3 project components",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"repository": {
|
"repository": {
|
||||||
"type": "git",
|
"type": "git",
|
||||||
"url": "git+https://github.com/scality/Arsenal.git"
|
"url": "git+https://github.com/scality/Arsenal.git"
|
||||||
},
|
},
|
||||||
"author": "Scality Inc.",
|
"author": "Giorgio Regni",
|
||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"bugs": {
|
"bugs": {
|
||||||
"url": "https://github.com/scality/Arsenal/issues"
|
"url": "https://github.com/scality/Arsenal/issues"
|
||||||
},
|
},
|
||||||
"homepage": "https://github.com/scality/Arsenal#readme",
|
"homepage": "https://github.com/scality/Arsenal#readme",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"JSONStream": "^1.0.0",
|
|
||||||
"ajv": "4.10.0",
|
"ajv": "4.10.0",
|
||||||
"async": "~2.1.5",
|
"async": "~2.1.5",
|
||||||
"bson": "2.0.4",
|
|
||||||
"debug": "~2.3.3",
|
"debug": "~2.3.3",
|
||||||
"diskusage": "^0.2.2",
|
"diskusage": "^0.2.2",
|
||||||
"fcntl": "github:scality/node-fcntl",
|
|
||||||
"ioredis": "2.4.0",
|
"ioredis": "2.4.0",
|
||||||
"ipaddr.js": "1.2.0",
|
"ipaddr.js": "1.2.0",
|
||||||
"joi": "^10.6",
|
"joi": "^10.6",
|
||||||
|
"JSONStream": "^1.0.0",
|
||||||
"level": "~1.6.0",
|
"level": "~1.6.0",
|
||||||
"level-sublevel": "~6.6.1",
|
"level-sublevel": "~6.6.1",
|
||||||
"mongodb": "^3.0.1",
|
|
||||||
"node-forge": "^0.7.1",
|
"node-forge": "^0.7.1",
|
||||||
"simple-glob": "^0.1",
|
"simple-glob": "^0.1",
|
||||||
"socket.io": "~1.7.3",
|
"socket.io": "~1.7.3",
|
||||||
|
|
|
@ -1,105 +0,0 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
|
||||||
|
|
||||||
const assert = require('assert');
|
|
||||||
|
|
||||||
const RedisClient = require('../../../lib/metrics/RedisClient');
|
|
||||||
const { backbeat } = require('../../../');
|
|
||||||
|
|
||||||
// expirations
|
|
||||||
const EXPIRY = 86400; // 24 hours
|
|
||||||
const THROUGHPUT_EXPIRY = 900; // 15 minutes
|
|
||||||
|
|
||||||
// setup redis client
|
|
||||||
const config = {
|
|
||||||
host: '127.0.0.1',
|
|
||||||
port: 6379,
|
|
||||||
enableOfflineQueue: false,
|
|
||||||
};
|
|
||||||
const fakeLogger = {
|
|
||||||
trace: () => {},
|
|
||||||
error: () => {},
|
|
||||||
};
|
|
||||||
const redisClient = new RedisClient(config, fakeLogger);
|
|
||||||
|
|
||||||
// setup stats model
|
|
||||||
const sites = ['site1', 'site2'];
|
|
||||||
const metrics = new backbeat.Metrics({
|
|
||||||
redisConfig: config,
|
|
||||||
validSites: ['site1', 'site2', 'all'],
|
|
||||||
internalStart: Date.now() - (EXPIRY * 1000), // 24 hours ago.
|
|
||||||
}, fakeLogger);
|
|
||||||
|
|
||||||
// Since many methods were overwritten, these tests should validate the changes
|
|
||||||
// made to the original methods
|
|
||||||
describe('Metrics class', () => {
|
|
||||||
afterEach(() => redisClient.clear(() => {}));
|
|
||||||
|
|
||||||
it('should not crash on empty results', done => {
|
|
||||||
const redisKeys = {
|
|
||||||
ops: 'bb:crr:ops',
|
|
||||||
bytes: 'bb:crr:bytes',
|
|
||||||
opsDone: 'bb:crr:opsdone',
|
|
||||||
bytesDone: 'bb:crr:bytesdone',
|
|
||||||
bytesFail: 'bb:crr:bytesfail',
|
|
||||||
opsFail: 'bb:crr:opsfail',
|
|
||||||
failedCRR: 'bb:crr:failed',
|
|
||||||
opsPending: 'bb:crr:bytespending',
|
|
||||||
bytesPending: 'bb:crr:opspending',
|
|
||||||
};
|
|
||||||
const routes = backbeat.routes(redisKeys, sites);
|
|
||||||
const details = routes.find(route =>
|
|
||||||
route.category === 'metrics' && route.type === 'all');
|
|
||||||
details.site = 'all';
|
|
||||||
metrics.getAllMetrics(details, (err, res) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
const expected = {
|
|
||||||
pending: {
|
|
||||||
description: 'Number of pending replication operations ' +
|
|
||||||
'(count) and bytes (size)',
|
|
||||||
results: {
|
|
||||||
count: 0,
|
|
||||||
size: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
backlog: {
|
|
||||||
description: 'Number of incomplete replication operations' +
|
|
||||||
' (count) and number of incomplete bytes transferred' +
|
|
||||||
' (size)',
|
|
||||||
results: {
|
|
||||||
count: 0,
|
|
||||||
size: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
completions: {
|
|
||||||
description: 'Number of completed replication operations' +
|
|
||||||
' (count) and number of bytes transferred (size) in ' +
|
|
||||||
`the last ${EXPIRY} seconds`,
|
|
||||||
results: {
|
|
||||||
count: 0,
|
|
||||||
size: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
failures: {
|
|
||||||
description: 'Number of failed replication operations ' +
|
|
||||||
`(count) and bytes (size) in the last ${EXPIRY} ` +
|
|
||||||
'seconds',
|
|
||||||
results: {
|
|
||||||
count: 0,
|
|
||||||
size: 0,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
throughput: {
|
|
||||||
description: 'Current throughput for replication' +
|
|
||||||
' operations in ops/sec (count) and bytes/sec (size) ' +
|
|
||||||
`in the last ${THROUGHPUT_EXPIRY} seconds`,
|
|
||||||
results: {
|
|
||||||
count: '0.00',
|
|
||||||
size: '0.00',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
assert.deepStrictEqual(res, expected);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -1,276 +0,0 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
|
||||||
|
|
||||||
const werelogs = require('werelogs');
|
|
||||||
const assert = require('assert');
|
|
||||||
const async = require('async');
|
|
||||||
|
|
||||||
const logger = new werelogs.Logger('MetadataProxyServer', 'debug', 'debug');
|
|
||||||
const MetadataWrapper =
|
|
||||||
require('../../../lib/storage/metadata/MetadataWrapper');
|
|
||||||
const BucketRoutes =
|
|
||||||
require('../../../lib/storage/metadata/proxy/BucketdRoutes');
|
|
||||||
const metadataWrapper = new MetadataWrapper('mem', {}, null, logger);
|
|
||||||
const { RequestDispatcher } = require('../../utils/mdProxyUtils');
|
|
||||||
|
|
||||||
const routes = new BucketRoutes(metadataWrapper, logger);
|
|
||||||
const dispatcher = new RequestDispatcher(routes);
|
|
||||||
|
|
||||||
const Bucket = 'test';
|
|
||||||
const bucketInfo = {
|
|
||||||
acl: {
|
|
||||||
Canned: 'private',
|
|
||||||
FULL_CONTROL: [],
|
|
||||||
WRITE: [],
|
|
||||||
WRITE_ACP: [],
|
|
||||||
READ: [],
|
|
||||||
READ_ACP: [],
|
|
||||||
},
|
|
||||||
name: Bucket,
|
|
||||||
owner: '9d8fe19a78974c56dceb2ea4a8f01ed0f5fecb9d29f80e9e3b84104e4a3ea520',
|
|
||||||
ownerDisplayName: 'anonymousCoward',
|
|
||||||
creationDate: '2018-06-04T17:45:42.592Z',
|
|
||||||
mdBucketModelVersion: 8,
|
|
||||||
transient: false,
|
|
||||||
deleted: false,
|
|
||||||
serverSideEncryption: null,
|
|
||||||
versioningConfiguration: null,
|
|
||||||
websiteConfiguration: null,
|
|
||||||
locationConstraint: 'us-east-1',
|
|
||||||
readLocationConstraint: 'us-east-1',
|
|
||||||
cors: null,
|
|
||||||
replicationConfiguration: null,
|
|
||||||
lifecycleConfiguration: null,
|
|
||||||
uid: 'fea97818-6a9a-11e8-9777-e311618cc5d4',
|
|
||||||
};
|
|
||||||
|
|
||||||
const objects = [
|
|
||||||
'aaa',
|
|
||||||
'bbb/xaa',
|
|
||||||
'bbb/xbb',
|
|
||||||
'bbb/xcc',
|
|
||||||
'ccc',
|
|
||||||
'ddd',
|
|
||||||
];
|
|
||||||
|
|
||||||
function _getExpectedListing(prefix, objects) {
|
|
||||||
const filtered = objects.map(key => {
|
|
||||||
const deprefixed = key.slice(prefix.length);
|
|
||||||
return deprefixed.replace(/[/].*/, '/');
|
|
||||||
});
|
|
||||||
const keySet = {};
|
|
||||||
return filtered.filter(key => {
|
|
||||||
if (keySet[key]) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
if (key === '') {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
keySet[key] = true;
|
|
||||||
return true;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function _listingURL(prefix, marker) {
|
|
||||||
const reSlash = /[/]/g;
|
|
||||||
const escapedPrefix = prefix.replace(reSlash, '%2F');
|
|
||||||
const escapedMarker = marker.replace(reSlash, '%2F');
|
|
||||||
return `/default/bucket/${Bucket}?delimiter=%2F&prefix=` +
|
|
||||||
`${escapedPrefix}&maxKeys=1&marker=${escapedMarker}`;
|
|
||||||
}
|
|
||||||
|
|
||||||
function _listObjects(prefix, objects, cb) {
|
|
||||||
const keys = _getExpectedListing(prefix, objects);
|
|
||||||
const markers = keys.slice(0);
|
|
||||||
markers.unshift(undefined);
|
|
||||||
const lastKey = keys[keys.length - 1];
|
|
||||||
const listing = keys.map((key, index) => ({
|
|
||||||
key,
|
|
||||||
marker: markers[index],
|
|
||||||
NextMarker: markers[index + 1],
|
|
||||||
IsTruncated: key !== lastKey,
|
|
||||||
isPrefix: key.endsWith('/'),
|
|
||||||
}));
|
|
||||||
async.mapLimit(listing, 5, (obj, next) => {
|
|
||||||
const currentMarker = obj.marker === undefined ? '' : obj.marker;
|
|
||||||
dispatcher.get(_listingURL(prefix, prefix + currentMarker),
|
|
||||||
(err, response, body) => {
|
|
||||||
if (err) {
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
if (obj.isPrefix) {
|
|
||||||
assert.strictEqual(body.Contents.length, 0);
|
|
||||||
assert.strictEqual(body.CommonPrefixes.length,
|
|
||||||
1);
|
|
||||||
assert.strictEqual(body.CommonPrefixes[0],
|
|
||||||
prefix + obj.key);
|
|
||||||
} else {
|
|
||||||
assert.strictEqual(body.Contents.length, 1);
|
|
||||||
assert.strictEqual(body.CommonPrefixes.length,
|
|
||||||
0);
|
|
||||||
assert.strictEqual(body.Contents[0].key,
|
|
||||||
prefix + obj.key);
|
|
||||||
}
|
|
||||||
assert.strictEqual(body.IsTruncated,
|
|
||||||
obj.IsTruncated);
|
|
||||||
if (body.IsTruncated) {
|
|
||||||
assert.strictEqual(body.NextMarker,
|
|
||||||
prefix + obj.NextMarker);
|
|
||||||
}
|
|
||||||
return next();
|
|
||||||
});
|
|
||||||
}, err => cb(err));
|
|
||||||
}
|
|
||||||
|
|
||||||
function _createObjects(objects, cb) {
|
|
||||||
async.mapLimit(objects, 5, (key, next) => {
|
|
||||||
dispatcher.post(`/default/bucket/${Bucket}/${key}`,
|
|
||||||
{ key }, next);
|
|
||||||
}, err => {
|
|
||||||
cb(err);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function _readObjects(objects, cb) {
|
|
||||||
async.mapLimit(objects, 5, (key, next) => {
|
|
||||||
dispatcher.get(`/default/bucket/${Bucket}/${key}`,
|
|
||||||
(err, response, body) => {
|
|
||||||
assert.deepStrictEqual(body.key, key);
|
|
||||||
next(err);
|
|
||||||
});
|
|
||||||
}, err => {
|
|
||||||
cb(err);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function _deleteObjects(objects, cb) {
|
|
||||||
async.mapLimit(objects, 5, (key, next) => {
|
|
||||||
dispatcher.delete(`/default/bucket/${Bucket}/${key}`,
|
|
||||||
err => next(err));
|
|
||||||
}, err => {
|
|
||||||
cb(err);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
describe('Basic Metadata Proxy Server test',
|
|
||||||
function bindToThis() {
|
|
||||||
this.timeout(10000);
|
|
||||||
it('Shoud get the metadataInformation', done => {
|
|
||||||
dispatcher.get('/default/metadataInformation',
|
|
||||||
(err, response, body) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
assert.deepStrictEqual(
|
|
||||||
body, { metadataVersion: 2 });
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('Basic Metadata Proxy Server CRUD test', function bindToThis() {
|
|
||||||
this.timeout(10000);
|
|
||||||
|
|
||||||
beforeEach(done => {
|
|
||||||
dispatcher.post(`/default/bucket/${Bucket}`, bucketInfo,
|
|
||||||
done);
|
|
||||||
});
|
|
||||||
|
|
||||||
afterEach(done => {
|
|
||||||
dispatcher.delete(`/default/bucket/${Bucket}`, done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Should get the bucket attributes', done => {
|
|
||||||
dispatcher.get(`/default/attributes/${Bucket}`,
|
|
||||||
(err, response, body) => {
|
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
assert.deepStrictEqual(body.name,
|
|
||||||
bucketInfo.name);
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Should crud an object', done => {
|
|
||||||
async.waterfall([
|
|
||||||
next => dispatcher.post(`/default/bucket/${Bucket}/test1`,
|
|
||||||
{ foo: 'gabu' }, err => next(err)),
|
|
||||||
next => dispatcher.get(`/default/bucket/${Bucket}/test1`,
|
|
||||||
(err, response, body) => {
|
|
||||||
if (!err) {
|
|
||||||
assert.deepStrictEqual(body.foo,
|
|
||||||
'gabu');
|
|
||||||
next(err);
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
next => dispatcher.post(`/default/bucket/${Bucket}/test1`,
|
|
||||||
{ foo: 'zome' }, err => next(err)),
|
|
||||||
next => dispatcher.get(`/default/bucket/${Bucket}/test1`,
|
|
||||||
(err, response, body) => {
|
|
||||||
if (!err) {
|
|
||||||
assert.deepStrictEqual(body.foo,
|
|
||||||
'zome');
|
|
||||||
next(err);
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
next => dispatcher.delete(`/default/bucket/${Bucket}/test1`,
|
|
||||||
err => next(err)),
|
|
||||||
], err => done(err));
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Should list objects', done => {
|
|
||||||
async.waterfall([
|
|
||||||
next => _createObjects(objects, next),
|
|
||||||
next => _readObjects(objects, next),
|
|
||||||
next => _listObjects('', objects, next),
|
|
||||||
next => _listObjects('bbb/', objects, next),
|
|
||||||
next => _deleteObjects(objects, next),
|
|
||||||
], err => {
|
|
||||||
done(err);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Should update bucket properties', done => {
|
|
||||||
dispatcher.get(
|
|
||||||
`/default/attributes/${Bucket}`, (err, response, body) => {
|
|
||||||
assert.strictEqual(err, null);
|
|
||||||
const bucketInfo = body;
|
|
||||||
const newOwnerDisplayName = 'divertedfrom';
|
|
||||||
bucketInfo.ownerDisplayName = newOwnerDisplayName;
|
|
||||||
dispatcher.post(
|
|
||||||
`/default/attributes/${Bucket}`, bucketInfo, err => {
|
|
||||||
assert.strictEqual(err, null);
|
|
||||||
dispatcher.get(
|
|
||||||
`/default/attributes/${Bucket}`,
|
|
||||||
(err, response, body) => {
|
|
||||||
assert.strictEqual(err, null);
|
|
||||||
const newBucketInfo = body;
|
|
||||||
assert.strictEqual(
|
|
||||||
newBucketInfo.ownerDisplayName,
|
|
||||||
newOwnerDisplayName);
|
|
||||||
done(null);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Should fail to list a non existing bucket', done => {
|
|
||||||
dispatcher.get('/default/bucket/nonexisting',
|
|
||||||
(err, response) => {
|
|
||||||
assert.strictEqual(
|
|
||||||
response.responseHead.statusCode,
|
|
||||||
404);
|
|
||||||
done(err);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('Should fail to get attributes from a non existing bucket', done => {
|
|
||||||
dispatcher.get('/default/attributes/nonexisting',
|
|
||||||
(err, response) => {
|
|
||||||
assert.strictEqual(
|
|
||||||
response.responseHead.statusCode,
|
|
||||||
404);
|
|
||||||
done(err);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -1,246 +0,0 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
|
||||||
|
|
||||||
const assert = require('assert');
|
|
||||||
const async = require('async');
|
|
||||||
|
|
||||||
const RedisClient = require('../../../lib/metrics/RedisClient');
|
|
||||||
const StatsModel = require('../../../lib/metrics/StatsModel');
|
|
||||||
|
|
||||||
// setup redis client
|
|
||||||
const config = {
|
|
||||||
host: '127.0.0.1',
|
|
||||||
port: 6379,
|
|
||||||
enableOfflineQueue: false,
|
|
||||||
};
|
|
||||||
const fakeLogger = {
|
|
||||||
trace: () => {},
|
|
||||||
error: () => {},
|
|
||||||
};
|
|
||||||
const redisClient = new RedisClient(config, fakeLogger);
|
|
||||||
|
|
||||||
// setup stats model
|
|
||||||
const STATS_INTERVAL = 300; // 5 minutes
|
|
||||||
const STATS_EXPIRY = 86400; // 24 hours
|
|
||||||
const statsModel = new StatsModel(redisClient, STATS_INTERVAL, STATS_EXPIRY);
|
|
||||||
|
|
||||||
function setExpectedStats(expected) {
|
|
||||||
return expected.concat(
|
|
||||||
Array((STATS_EXPIRY / STATS_INTERVAL) - expected.length).fill(0));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Since many methods were overwritten, these tests should validate the changes
|
|
||||||
// made to the original methods
|
|
||||||
describe('StatsModel class', () => {
|
|
||||||
const id = 'arsenal-test';
|
|
||||||
const id2 = 'test-2';
|
|
||||||
const id3 = 'test-3';
|
|
||||||
|
|
||||||
afterEach(() => redisClient.clear(() => {}));
|
|
||||||
|
|
||||||
it('should convert a 2d array columns into rows and vice versa using _zip',
|
|
||||||
() => {
|
|
||||||
const arrays = [
|
|
||||||
[1, 2, 3],
|
|
||||||
[4, 5, 6],
|
|
||||||
[7, 8, 9],
|
|
||||||
];
|
|
||||||
|
|
||||||
const res = statsModel._zip(arrays);
|
|
||||||
const expected = [
|
|
||||||
[1, 4, 7],
|
|
||||||
[2, 5, 8],
|
|
||||||
[3, 6, 9],
|
|
||||||
];
|
|
||||||
|
|
||||||
assert.deepStrictEqual(res, expected);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('_zip should return an empty array if given an invalid array', () => {
|
|
||||||
const arrays = [];
|
|
||||||
|
|
||||||
const res = statsModel._zip(arrays);
|
|
||||||
|
|
||||||
assert.deepStrictEqual(res, []);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('_getCount should return a an array of all valid integer values',
|
|
||||||
() => {
|
|
||||||
const res = statsModel._getCount([
|
|
||||||
[null, '1'],
|
|
||||||
[null, '2'],
|
|
||||||
[null, null],
|
|
||||||
]);
|
|
||||||
assert.deepStrictEqual(res, setExpectedStats([1, 2, 0]));
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should correctly record a new request by default one increment',
|
|
||||||
done => {
|
|
||||||
async.series([
|
|
||||||
next => {
|
|
||||||
statsModel.reportNewRequest(id, (err, res) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
|
|
||||||
const expected = [[null, 1], [null, 1]];
|
|
||||||
assert.deepStrictEqual(res, expected);
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
next => {
|
|
||||||
statsModel.reportNewRequest(id, (err, res) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
|
|
||||||
const expected = [[null, 2], [null, 1]];
|
|
||||||
assert.deepStrictEqual(res, expected);
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should record new requests by defined amount increments', done => {
|
|
||||||
function noop() {}
|
|
||||||
|
|
||||||
async.series([
|
|
||||||
next => {
|
|
||||||
statsModel.reportNewRequest(id, 9);
|
|
||||||
statsModel.getStats(fakeLogger, id, (err, res) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
|
|
||||||
assert.deepStrictEqual(res.requests, setExpectedStats([9]));
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
next => {
|
|
||||||
statsModel.reportNewRequest(id);
|
|
||||||
statsModel.getStats(fakeLogger, id, (err, res) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
|
|
||||||
assert.deepStrictEqual(res.requests,
|
|
||||||
setExpectedStats([10]));
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
next => {
|
|
||||||
statsModel.reportNewRequest(id, noop);
|
|
||||||
statsModel.getStats(fakeLogger, id, (err, res) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
|
|
||||||
assert.deepStrictEqual(res.requests,
|
|
||||||
setExpectedStats([11]));
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should correctly record a 500 on the server', done => {
|
|
||||||
statsModel.report500(id, (err, res) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
|
|
||||||
const expected = [[null, 1], [null, 1]];
|
|
||||||
assert.deepStrictEqual(res, expected);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should respond back with total requests as an array', done => {
|
|
||||||
async.series([
|
|
||||||
next => {
|
|
||||||
statsModel.reportNewRequest(id, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
next => {
|
|
||||||
statsModel.report500(id, err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
next => {
|
|
||||||
statsModel.getStats(fakeLogger, id, (err, res) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
|
|
||||||
const expected = {
|
|
||||||
'requests': setExpectedStats([1]),
|
|
||||||
'500s': setExpectedStats([1]),
|
|
||||||
'sampleDuration': STATS_EXPIRY,
|
|
||||||
};
|
|
||||||
assert.deepStrictEqual(res, expected);
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not crash on empty results', done => {
|
|
||||||
async.series([
|
|
||||||
next => {
|
|
||||||
statsModel.getStats(fakeLogger, id, (err, res) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
const expected = {
|
|
||||||
'requests': setExpectedStats([]),
|
|
||||||
'500s': setExpectedStats([]),
|
|
||||||
'sampleDuration': STATS_EXPIRY,
|
|
||||||
};
|
|
||||||
assert.deepStrictEqual(res, expected);
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
next => {
|
|
||||||
statsModel.getAllStats(fakeLogger, id, (err, res) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
const expected = {
|
|
||||||
'requests': setExpectedStats([]),
|
|
||||||
'500s': setExpectedStats([]),
|
|
||||||
'sampleDuration': STATS_EXPIRY,
|
|
||||||
};
|
|
||||||
assert.deepStrictEqual(res, expected);
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return a zero-filled array if no ids are passed to getAllStats',
|
|
||||||
done => {
|
|
||||||
statsModel.getAllStats(fakeLogger, [], (err, res) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
|
|
||||||
assert.deepStrictEqual(res.requests, setExpectedStats([]));
|
|
||||||
assert.deepStrictEqual(res['500s'], setExpectedStats([]));
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should get accurately reported data for given id from getAllStats',
|
|
||||||
done => {
|
|
||||||
statsModel.reportNewRequest(id, 9);
|
|
||||||
statsModel.reportNewRequest(id2, 2);
|
|
||||||
statsModel.reportNewRequest(id3, 3);
|
|
||||||
statsModel.report500(id);
|
|
||||||
|
|
||||||
async.series([
|
|
||||||
next => {
|
|
||||||
statsModel.getAllStats(fakeLogger, [id], (err, res) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
|
|
||||||
assert.equal(res.requests[0], 9);
|
|
||||||
assert.equal(res['500s'][0], 1);
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
next => {
|
|
||||||
statsModel.getAllStats(fakeLogger, [id, id2, id3],
|
|
||||||
(err, res) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
|
|
||||||
assert.equal(res.requests[0], 14);
|
|
||||||
assert.deepStrictEqual(res.requests,
|
|
||||||
setExpectedStats([14]));
|
|
||||||
next();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
], done);
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -269,33 +269,4 @@ describe('v4 headerAuthCheck', () => {
|
||||||
assert.strictEqual(res.params.version, 4);
|
assert.strictEqual(res.params.version, 4);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should not return error if proxy_path header is added', done => {
|
|
||||||
// Freezes time so date created within function will be Feb 8, 2016
|
|
||||||
const clock = lolex.install(1454962445000);
|
|
||||||
/* eslint-disable camelcase */
|
|
||||||
const alteredRequest = createAlteredRequest({
|
|
||||||
proxy_path: 'proxy/1234' }, 'headers', request, headers);
|
|
||||||
/* eslint-enable camelcase */
|
|
||||||
const res = headerAuthCheck(alteredRequest, log);
|
|
||||||
clock.uninstall();
|
|
||||||
assert.strictEqual(res.err, null);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return InvalidRequest error if proxy_path header is invalid',
|
|
||||||
done => {
|
|
||||||
// Freezes time so date created within function will be Feb 8, 2016
|
|
||||||
const clock = lolex.install(1454962445000);
|
|
||||||
/* eslint-disable camelcase */
|
|
||||||
const alteredRequest = createAlteredRequest({
|
|
||||||
proxy_path: 'absc%2proxy/1234' }, 'headers', request, headers);
|
|
||||||
/* eslint-enable camelcase */
|
|
||||||
const res = headerAuthCheck(alteredRequest, log);
|
|
||||||
clock.uninstall();
|
|
||||||
assert.deepStrictEqual(res.err,
|
|
||||||
errors.InvalidArgument.customizeDescription(
|
|
||||||
'invalid proxy_path header'));
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
|
@ -225,34 +225,4 @@ describe('v4 queryAuthCheck', () => {
|
||||||
assert.strictEqual(res.params.version, 4);
|
assert.strictEqual(res.params.version, 4);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should successfully return no error if proxy_path header is added',
|
|
||||||
done => {
|
|
||||||
// Freezes time so date created within function will be Feb 8, 2016
|
|
||||||
const clock = lolex.install(1454974984001);
|
|
||||||
/* eslint-disable camelcase */
|
|
||||||
const alteredRequest = createAlteredRequest({ proxy_path:
|
|
||||||
'proxy/1234' }, 'headers', request, query);
|
|
||||||
/* eslint-enable camelcase */
|
|
||||||
const res = queryAuthCheck(alteredRequest, log, alteredRequest.query);
|
|
||||||
clock.uninstall();
|
|
||||||
assert.deepStrictEqual(res.err, null);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return InvalidRequest error if proxy_path header is invalid',
|
|
||||||
done => {
|
|
||||||
// Freezes time so date created within function will be Feb 8, 2016
|
|
||||||
const clock = lolex.install(1454974984001);
|
|
||||||
/* eslint-disable camelcase */
|
|
||||||
const alteredRequest = createAlteredRequest({ proxy_path:
|
|
||||||
'absc%2proxy/1234' }, 'headers', request, query);
|
|
||||||
/* eslint-enable camelcase */
|
|
||||||
const res = queryAuthCheck(alteredRequest, log, alteredRequest.query);
|
|
||||||
clock.uninstall();
|
|
||||||
assert.deepStrictEqual(res.err,
|
|
||||||
errors.InvalidArgument.customizeDescription(
|
|
||||||
'invalid proxy_path header'));
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
|
@ -59,7 +59,6 @@ const testWebsiteConfiguration = new WebsiteConfiguration({
|
||||||
});
|
});
|
||||||
|
|
||||||
const testLocationConstraint = 'us-west-1';
|
const testLocationConstraint = 'us-west-1';
|
||||||
const testReadLocationConstraint = 'us-west-2';
|
|
||||||
|
|
||||||
const testCorsConfiguration = [
|
const testCorsConfiguration = [
|
||||||
{ id: 'test',
|
{ id: 'test',
|
||||||
|
@ -116,8 +115,6 @@ const testLifecycleConfiguration = {
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
};
|
};
|
||||||
|
|
||||||
const testUid = '99ae3446-7082-4c17-ac97-52965dc004ec';
|
|
||||||
// create a dummy bucket to test getters and setters
|
// create a dummy bucket to test getters and setters
|
||||||
|
|
||||||
Object.keys(acl).forEach(
|
Object.keys(acl).forEach(
|
||||||
|
@ -135,8 +132,7 @@ Object.keys(acl).forEach(
|
||||||
testWebsiteConfiguration,
|
testWebsiteConfiguration,
|
||||||
testCorsConfiguration,
|
testCorsConfiguration,
|
||||||
testReplicationConfiguration,
|
testReplicationConfiguration,
|
||||||
testLifecycleConfiguration,
|
testLifecycleConfiguration);
|
||||||
testUid, undefined, true);
|
|
||||||
|
|
||||||
describe('serialize/deSerialize on BucketInfo class', () => {
|
describe('serialize/deSerialize on BucketInfo class', () => {
|
||||||
const serialized = dummyBucket.serialize();
|
const serialized = dummyBucket.serialize();
|
||||||
|
@ -155,7 +151,6 @@ Object.keys(acl).forEach(
|
||||||
versioningConfiguration:
|
versioningConfiguration:
|
||||||
dummyBucket._versioningConfiguration,
|
dummyBucket._versioningConfiguration,
|
||||||
locationConstraint: dummyBucket._locationConstraint,
|
locationConstraint: dummyBucket._locationConstraint,
|
||||||
readLocationConstraint: dummyBucket._readLocationConstraint,
|
|
||||||
websiteConfiguration: dummyBucket._websiteConfiguration
|
websiteConfiguration: dummyBucket._websiteConfiguration
|
||||||
.getConfig(),
|
.getConfig(),
|
||||||
cors: dummyBucket._cors,
|
cors: dummyBucket._cors,
|
||||||
|
@ -163,8 +158,6 @@ Object.keys(acl).forEach(
|
||||||
dummyBucket._replicationConfiguration,
|
dummyBucket._replicationConfiguration,
|
||||||
lifecycleConfiguration:
|
lifecycleConfiguration:
|
||||||
dummyBucket._lifecycleConfiguration,
|
dummyBucket._lifecycleConfiguration,
|
||||||
uid: dummyBucket._uid,
|
|
||||||
isNFS: dummyBucket._isNFS,
|
|
||||||
};
|
};
|
||||||
assert.strictEqual(serialized, JSON.stringify(bucketInfos));
|
assert.strictEqual(serialized, JSON.stringify(bucketInfos));
|
||||||
done();
|
done();
|
||||||
|
@ -189,7 +182,6 @@ Object.keys(acl).forEach(
|
||||||
'string');
|
'string');
|
||||||
assert.strictEqual(typeof dummyBucket.getCreationDate(),
|
assert.strictEqual(typeof dummyBucket.getCreationDate(),
|
||||||
'string');
|
'string');
|
||||||
assert.strictEqual(typeof dummyBucket.getUid(), 'string');
|
|
||||||
});
|
});
|
||||||
it('this should have the right acl\'s types', () => {
|
it('this should have the right acl\'s types', () => {
|
||||||
assert.strictEqual(typeof dummyBucket.getAcl(), 'object');
|
assert.strictEqual(typeof dummyBucket.getAcl(), 'object');
|
||||||
|
@ -257,18 +249,6 @@ Object.keys(acl).forEach(
|
||||||
assert.deepStrictEqual(dummyBucket.getLocationConstraint(),
|
assert.deepStrictEqual(dummyBucket.getLocationConstraint(),
|
||||||
testLocationConstraint);
|
testLocationConstraint);
|
||||||
});
|
});
|
||||||
it('getReadLocationConstraint should return locationConstraint ' +
|
|
||||||
'if readLocationConstraint hasn\'t been set', () => {
|
|
||||||
assert.deepStrictEqual(dummyBucket.getReadLocationConstraint(),
|
|
||||||
testLocationConstraint);
|
|
||||||
});
|
|
||||||
it('getReadLocationConstraint should return readLocationConstraint',
|
|
||||||
() => {
|
|
||||||
dummyBucket._readLocationConstraint =
|
|
||||||
testReadLocationConstraint;
|
|
||||||
assert.deepStrictEqual(dummyBucket.getReadLocationConstraint(),
|
|
||||||
testReadLocationConstraint);
|
|
||||||
});
|
|
||||||
it('getCors should return CORS configuration', () => {
|
it('getCors should return CORS configuration', () => {
|
||||||
assert.deepStrictEqual(dummyBucket.getCors(),
|
assert.deepStrictEqual(dummyBucket.getCors(),
|
||||||
testCorsConfiguration);
|
testCorsConfiguration);
|
||||||
|
@ -277,16 +257,6 @@ Object.keys(acl).forEach(
|
||||||
assert.deepStrictEqual(dummyBucket.getLifecycleConfiguration(),
|
assert.deepStrictEqual(dummyBucket.getLifecycleConfiguration(),
|
||||||
testLifecycleConfiguration);
|
testLifecycleConfiguration);
|
||||||
});
|
});
|
||||||
it('getUid should return unique id of bucket', () => {
|
|
||||||
assert.deepStrictEqual(dummyBucket.getUid(), testUid);
|
|
||||||
});
|
|
||||||
it('isNFS should return whether bucket is on NFS', () => {
|
|
||||||
assert.deepStrictEqual(dummyBucket.isNFS(), true);
|
|
||||||
});
|
|
||||||
it('setIsNFS should set whether bucket is on NFS', () => {
|
|
||||||
dummyBucket.setIsNFS(false);
|
|
||||||
assert.deepStrictEqual(dummyBucket.isNFS(), false);
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('setters on BucketInfo class', () => {
|
describe('setters on BucketInfo class', () => {
|
||||||
|
@ -358,7 +328,8 @@ Object.keys(acl).forEach(
|
||||||
protocol: 'https',
|
protocol: 'https',
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
dummyBucket.setWebsiteConfiguration(newWebsiteConfiguration);
|
dummyBucket
|
||||||
|
.setWebsiteConfiguration(newWebsiteConfiguration);
|
||||||
assert.deepStrictEqual(dummyBucket.getWebsiteConfiguration(),
|
assert.deepStrictEqual(dummyBucket.getWebsiteConfiguration(),
|
||||||
newWebsiteConfiguration);
|
newWebsiteConfiguration);
|
||||||
});
|
});
|
||||||
|
@ -410,26 +381,3 @@ Object.keys(acl).forEach(
|
||||||
});
|
});
|
||||||
})
|
})
|
||||||
);
|
);
|
||||||
|
|
||||||
describe('uid default', () => {
|
|
||||||
it('should set uid if none is specified by constructor params', () => {
|
|
||||||
const dummyBucket = new BucketInfo(
|
|
||||||
bucketName, owner, ownerDisplayName, testDate,
|
|
||||||
BucketInfo.currentModelVersion(), acl[emptyAcl],
|
|
||||||
false, false, {
|
|
||||||
cryptoScheme: 1,
|
|
||||||
algorithm: 'sha1',
|
|
||||||
masterKeyId: 'somekey',
|
|
||||||
mandatory: true,
|
|
||||||
}, testVersioningConfiguration,
|
|
||||||
testLocationConstraint,
|
|
||||||
testWebsiteConfiguration,
|
|
||||||
testCorsConfiguration,
|
|
||||||
testReplicationConfiguration,
|
|
||||||
testLifecycleConfiguration);
|
|
||||||
|
|
||||||
const defaultUid = dummyBucket.getUid();
|
|
||||||
assert(defaultUid);
|
|
||||||
assert.strictEqual(defaultUid.length, 36);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
|
@ -175,10 +175,6 @@ function generateFilter(errorTag, tagObj) {
|
||||||
middleTags = '<Prefix>foo</Prefix><Prefix>bar</Prefix>' +
|
middleTags = '<Prefix>foo</Prefix><Prefix>bar</Prefix>' +
|
||||||
`<Prefix>${tagObj.lastPrefix}</Prefix>`;
|
`<Prefix>${tagObj.lastPrefix}</Prefix>`;
|
||||||
}
|
}
|
||||||
if (tagObj.label === 'mult-tags') {
|
|
||||||
middleTags = '<And><Tag><Key>color</Key><Value>blue</Value></Tag>' +
|
|
||||||
'<Tag><Key>shape</Key><Value>circle</Value></Tag></And>';
|
|
||||||
}
|
|
||||||
Filter = `<Filter>${middleTags}</Filter>`;
|
Filter = `<Filter>${middleTags}</Filter>`;
|
||||||
if (tagObj.label === 'also-prefix') {
|
if (tagObj.label === 'also-prefix') {
|
||||||
Filter = '<Filter></Filter><Prefix></Prefix>';
|
Filter = '<Filter></Filter><Prefix></Prefix>';
|
||||||
|
@ -353,16 +349,4 @@ describe('LifecycleConfiguration class getLifecycleConfiguration', () => {
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should apply all unique Key tags if multiple tags included', done => {
|
|
||||||
tagObj.label = 'mult-tags';
|
|
||||||
generateParsedXml('Filter', tagObj, parsedXml => {
|
|
||||||
const lcConfig = new LifecycleConfiguration(parsedXml).
|
|
||||||
getLifecycleConfiguration();
|
|
||||||
const expected = [{ key: 'color', val: 'blue' },
|
|
||||||
{ key: 'shape', val: 'circle' }];
|
|
||||||
assert.deepStrictEqual(expected, lcConfig.rules[0].filter.tags);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
|
@ -1,74 +0,0 @@
|
||||||
const assert = require('assert');
|
|
||||||
const { parseString } = require('xml2js');
|
|
||||||
|
|
||||||
const werelogs = require('werelogs');
|
|
||||||
|
|
||||||
const ReplicationConfiguration =
|
|
||||||
require('../../../lib/models/ReplicationConfiguration');
|
|
||||||
|
|
||||||
const logger = new werelogs.Logger('test:ReplicationConfiguration');
|
|
||||||
|
|
||||||
const mockedConfig = {
|
|
||||||
replicationEndpoints: [{
|
|
||||||
type: 'scality',
|
|
||||||
site: 'ring',
|
|
||||||
default: true,
|
|
||||||
}, {
|
|
||||||
type: 'aws_s3',
|
|
||||||
site: 'awsbackend',
|
|
||||||
}, {
|
|
||||||
type: 'gcp',
|
|
||||||
site: 'gcpbackend',
|
|
||||||
}, {
|
|
||||||
type: 'azure',
|
|
||||||
site: 'azurebackend',
|
|
||||||
}],
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
function getXMLConfig(hasPreferredRead) {
|
|
||||||
return `
|
|
||||||
<ReplicationConfiguration>
|
|
||||||
<Role>arn:aws:iam::root:role/s3-replication-role</Role>
|
|
||||||
<Rule>
|
|
||||||
<ID>Replication-Rule-1</ID>
|
|
||||||
<Status>Enabled</Status>
|
|
||||||
<Prefix>someprefix/</Prefix>
|
|
||||||
<Destination>
|
|
||||||
<Bucket>arn:aws:s3:::destbucket</Bucket>
|
|
||||||
<StorageClass>awsbackend,` +
|
|
||||||
`gcpbackend${hasPreferredRead ? ':preferred_read' : ''},azurebackend` +
|
|
||||||
`</StorageClass>
|
|
||||||
</Destination>
|
|
||||||
</Rule>
|
|
||||||
</ReplicationConfiguration>
|
|
||||||
`;
|
|
||||||
}
|
|
||||||
|
|
||||||
describe('ReplicationConfiguration class', () => {
|
|
||||||
it('should parse replication config XML without preferred read', done => {
|
|
||||||
const repConfigXML = getXMLConfig(false);
|
|
||||||
parseString(repConfigXML, (err, parsedXml) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
const repConf = new ReplicationConfiguration(
|
|
||||||
parsedXml, logger, mockedConfig);
|
|
||||||
const repConfErr = repConf.parseConfiguration();
|
|
||||||
assert.ifError(repConfErr);
|
|
||||||
assert.strictEqual(repConf.getPreferredReadLocation(), null);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
it('should parse replication config XML with preferred read', done => {
|
|
||||||
const repConfigXML = getXMLConfig(true);
|
|
||||||
parseString(repConfigXML, (err, parsedXml) => {
|
|
||||||
assert.ifError(err);
|
|
||||||
const repConf = new ReplicationConfiguration(
|
|
||||||
parsedXml, logger, mockedConfig);
|
|
||||||
const repConfErr = repConf.parseConfiguration();
|
|
||||||
assert.ifError(repConfErr);
|
|
||||||
assert.strictEqual(repConf.getPreferredReadLocation(),
|
|
||||||
'gcpbackend');
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -82,7 +82,6 @@ describe('ObjectMD class setters/getters', () => {
|
||||||
role: '',
|
role: '',
|
||||||
storageType: '',
|
storageType: '',
|
||||||
dataStoreVersionId: '',
|
dataStoreVersionId: '',
|
||||||
isNFS: null,
|
|
||||||
}],
|
}],
|
||||||
['ReplicationInfo', {
|
['ReplicationInfo', {
|
||||||
status: 'PENDING',
|
status: 'PENDING',
|
||||||
|
@ -98,11 +97,8 @@ describe('ObjectMD class setters/getters', () => {
|
||||||
'arn:aws:iam::account-id:role/dest-resource',
|
'arn:aws:iam::account-id:role/dest-resource',
|
||||||
storageType: 'aws_s3',
|
storageType: 'aws_s3',
|
||||||
dataStoreVersionId: '',
|
dataStoreVersionId: '',
|
||||||
isNFS: null,
|
|
||||||
}],
|
}],
|
||||||
['DataStoreName', null, ''],
|
['DataStoreName', null, ''],
|
||||||
['ReplicationIsNFS', null, null],
|
|
||||||
['ReplicationIsNFS', true],
|
|
||||||
].forEach(test => {
|
].forEach(test => {
|
||||||
const property = test[0];
|
const property = test[0];
|
||||||
const testValue = test[1];
|
const testValue = test[1];
|
||||||
|
@ -140,24 +136,6 @@ describe('ObjectMD class setters/getters', () => {
|
||||||
}]);
|
}]);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('ObjectMD::setReplicationBackends', () => {
|
|
||||||
md.setReplicationBackends([{
|
|
||||||
site: 'a',
|
|
||||||
status: 'b',
|
|
||||||
dataStoreVersionId: 'c',
|
|
||||||
}]);
|
|
||||||
assert.deepStrictEqual(md.getReplicationBackends(), [{
|
|
||||||
site: 'a',
|
|
||||||
status: 'b',
|
|
||||||
dataStoreVersionId: 'c',
|
|
||||||
}]);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('ObjectMD::setReplicationStorageClass', () => {
|
|
||||||
md.setReplicationStorageClass('a');
|
|
||||||
assert.strictEqual(md.getReplicationStorageClass(), 'a');
|
|
||||||
});
|
|
||||||
|
|
||||||
it('ObjectMD::getReplicationSiteStatus', () => {
|
it('ObjectMD::getReplicationSiteStatus', () => {
|
||||||
md.setReplicationInfo({
|
md.setReplicationInfo({
|
||||||
backends: [{
|
backends: [{
|
||||||
|
|
|
@ -1,170 +0,0 @@
|
||||||
const assert = require('assert');
|
|
||||||
const HealthProbeServer =
|
|
||||||
require('../../../../lib/network/probe/HealthProbeServer');
|
|
||||||
const http = require('http');
|
|
||||||
|
|
||||||
function makeRequest(meth, uri) {
|
|
||||||
const params = {
|
|
||||||
hostname: 'localhost',
|
|
||||||
port: 4042,
|
|
||||||
method: meth,
|
|
||||||
path: uri,
|
|
||||||
};
|
|
||||||
const req = http.request(params);
|
|
||||||
req.setNoDelay(true);
|
|
||||||
return req;
|
|
||||||
}
|
|
||||||
|
|
||||||
const endpoints = [
|
|
||||||
'/_/health/liveness',
|
|
||||||
'/_/health/readiness',
|
|
||||||
];
|
|
||||||
|
|
||||||
const badEndpoints = [
|
|
||||||
'/_/health/liveness_thisiswrong',
|
|
||||||
'/_/health/readiness_thisiswrong',
|
|
||||||
];
|
|
||||||
|
|
||||||
describe('network.probe.HealthProbeServer', () => {
|
|
||||||
describe('service is "up"', () => {
|
|
||||||
let server;
|
|
||||||
function setup(done) {
|
|
||||||
server = new HealthProbeServer({ port: 4042 });
|
|
||||||
server.start();
|
|
||||||
done();
|
|
||||||
}
|
|
||||||
|
|
||||||
before(done => {
|
|
||||||
setup(done);
|
|
||||||
});
|
|
||||||
|
|
||||||
after(done => {
|
|
||||||
server.stop();
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
endpoints.forEach(ep => {
|
|
||||||
it('should perform a GET and ' +
|
|
||||||
'return 200 OK', done => {
|
|
||||||
makeRequest('GET', ep)
|
|
||||||
.on('response', res => {
|
|
||||||
assert(res.statusCode === 200);
|
|
||||||
done();
|
|
||||||
})
|
|
||||||
.on('error', err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
done();
|
|
||||||
}).end();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('service is "down"', () => {
|
|
||||||
let server;
|
|
||||||
function setup(done) {
|
|
||||||
function falseStub() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
server = new HealthProbeServer({
|
|
||||||
port: 4042,
|
|
||||||
livenessCheck: falseStub,
|
|
||||||
readinessCheck: falseStub,
|
|
||||||
});
|
|
||||||
server.start();
|
|
||||||
done();
|
|
||||||
}
|
|
||||||
|
|
||||||
before(done => {
|
|
||||||
setup(done);
|
|
||||||
});
|
|
||||||
|
|
||||||
after(done => {
|
|
||||||
server.stop();
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
|
|
||||||
endpoints.forEach(ep => {
|
|
||||||
it('should perform a GET and ' +
|
|
||||||
'return 503 ServiceUnavailable', done => {
|
|
||||||
makeRequest('GET', ep)
|
|
||||||
.on('response', res => {
|
|
||||||
assert(res.statusCode === 503);
|
|
||||||
done();
|
|
||||||
})
|
|
||||||
.on('error', err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
done();
|
|
||||||
}).end();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('Invalid Methods', () => {
|
|
||||||
let server;
|
|
||||||
function setup(done) {
|
|
||||||
server = new HealthProbeServer({
|
|
||||||
port: 4042,
|
|
||||||
});
|
|
||||||
server.start();
|
|
||||||
done();
|
|
||||||
}
|
|
||||||
|
|
||||||
before(done => {
|
|
||||||
setup(done);
|
|
||||||
});
|
|
||||||
|
|
||||||
after(done => {
|
|
||||||
server.stop();
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
|
|
||||||
endpoints.forEach(ep => {
|
|
||||||
it('should perform a POST and ' +
|
|
||||||
'return 405 MethodNotAllowed', done => {
|
|
||||||
makeRequest('POST', ep)
|
|
||||||
.on('response', res => {
|
|
||||||
assert(res.statusCode === 405);
|
|
||||||
done();
|
|
||||||
})
|
|
||||||
.on('error', err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
done();
|
|
||||||
}).end();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('Invalid URI', () => {
|
|
||||||
let server;
|
|
||||||
function setup(done) {
|
|
||||||
server = new HealthProbeServer({
|
|
||||||
port: 4042,
|
|
||||||
});
|
|
||||||
server.start();
|
|
||||||
done();
|
|
||||||
}
|
|
||||||
|
|
||||||
before(done => {
|
|
||||||
setup(done);
|
|
||||||
});
|
|
||||||
|
|
||||||
after(done => {
|
|
||||||
server.stop();
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
|
|
||||||
badEndpoints.forEach(ep => {
|
|
||||||
it('should perform a GET and ' +
|
|
||||||
'return 400 InvalidURI', done => {
|
|
||||||
makeRequest('GET', ep)
|
|
||||||
.on('response', res => {
|
|
||||||
assert(res.statusCode === 400);
|
|
||||||
done();
|
|
||||||
})
|
|
||||||
.on('error', err => {
|
|
||||||
assert.ifError(err);
|
|
||||||
done();
|
|
||||||
}).end();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -7,7 +7,6 @@ const {
|
||||||
_checkEtagNoneMatch,
|
_checkEtagNoneMatch,
|
||||||
_checkModifiedSince,
|
_checkModifiedSince,
|
||||||
_checkUnmodifiedSince,
|
_checkUnmodifiedSince,
|
||||||
checkDateModifiedHeaders,
|
|
||||||
validateConditionalHeaders,
|
validateConditionalHeaders,
|
||||||
} = require('../../../lib/s3middleware/validateConditionalHeaders');
|
} = require('../../../lib/s3middleware/validateConditionalHeaders');
|
||||||
|
|
||||||
|
@ -173,59 +172,6 @@ describe('validateConditionalHeaders util function ::', () => {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('checkDateModifiedHeaders util function: ', () => {
|
|
||||||
const expectedSuccess = {
|
|
||||||
present: true,
|
|
||||||
error: null,
|
|
||||||
};
|
|
||||||
|
|
||||||
const expectedAbsense = {
|
|
||||||
present: false,
|
|
||||||
error: null,
|
|
||||||
};
|
|
||||||
|
|
||||||
it('should return NotModified error for \'if-modified-since\' header',
|
|
||||||
() => {
|
|
||||||
const header = {};
|
|
||||||
header['if-modified-since'] = afterLastModified;
|
|
||||||
const { modifiedSinceRes, unmodifiedSinceRes } =
|
|
||||||
checkDateModifiedHeaders(header, lastModified);
|
|
||||||
assert.deepStrictEqual(modifiedSinceRes.error, errors.NotModified);
|
|
||||||
assert.deepStrictEqual(unmodifiedSinceRes, expectedAbsense);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return PreconditionFailed error for \'if-unmodified-since\' ' +
|
|
||||||
'header', () => {
|
|
||||||
const header = {};
|
|
||||||
header['if-unmodified-since'] = beforeLastModified;
|
|
||||||
const { modifiedSinceRes, unmodifiedSinceRes } =
|
|
||||||
checkDateModifiedHeaders(header, lastModified);
|
|
||||||
assert.deepStrictEqual(unmodifiedSinceRes.error,
|
|
||||||
errors.PreconditionFailed);
|
|
||||||
assert.deepStrictEqual(modifiedSinceRes, expectedAbsense);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should succeed if \'if-modified-since\' header value is earlier ' +
|
|
||||||
'than last modified', () => {
|
|
||||||
const header = {};
|
|
||||||
header['if-modified-since'] = beforeLastModified;
|
|
||||||
const { modifiedSinceRes, unmodifiedSinceRes } =
|
|
||||||
checkDateModifiedHeaders(header, lastModified);
|
|
||||||
assert.deepStrictEqual(modifiedSinceRes, expectedSuccess);
|
|
||||||
assert.deepStrictEqual(unmodifiedSinceRes, expectedAbsense);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should succeed if \'if-unmodified-since\' header value is later ' +
|
|
||||||
'than last modified', () => {
|
|
||||||
const header = {};
|
|
||||||
header['if-unmodified-since'] = afterLastModified;
|
|
||||||
const { modifiedSinceRes, unmodifiedSinceRes } =
|
|
||||||
checkDateModifiedHeaders(header, lastModified);
|
|
||||||
assert.deepStrictEqual(unmodifiedSinceRes, expectedSuccess);
|
|
||||||
assert.deepStrictEqual(modifiedSinceRes, expectedAbsense);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('_checkEtagMatch function :', () => {
|
describe('_checkEtagMatch function :', () => {
|
||||||
const expectedSuccess = {
|
const expectedSuccess = {
|
||||||
present: true,
|
present: true,
|
||||||
|
|
|
@ -1,36 +0,0 @@
|
||||||
const assert = require('assert');
|
|
||||||
const routesUtils = require('../../../../lib/s3routes/routesUtils.js');
|
|
||||||
|
|
||||||
const bannedStr = 'banned';
|
|
||||||
const prefixBlacklist = [];
|
|
||||||
|
|
||||||
// byte size of 915
|
|
||||||
const keyutf8 = '%EA%9D%8B崰㈌㒈保轖䳷䀰⺩ቆ楪秲ⴝ㿅鼎ꓜ퇬枅염곞召㸾⌙ꪊᆐ庍뉆䌗幐鸆䛃➟녩' +
|
|
||||||
'ˍ뙪臅⠙≼绒벊냂詴 끴鹲萯⇂㭢䈊퉉楝舳㷖족痴䧫㾵ำꎆ꼵껪멷誕㳓腜쒃컹㑻鳃삚舿췈孨੦⮀NJ곓꺼꜈' +
|
|
||||||
'嗼뫘悕錸瑺륒㜓垻ㆩꝿ詀펉ᆙ舑䜾힑藪碙ꀎꂰ췊Ᏻ 㘺幽醛잯ද汧Ꟑꛒⶨ쪸숞헹㭔ꡔᘼ뺓ᡆᑟ䅅퀭耓弧⢠⇙' +
|
|
||||||
'폪ް蛧⃪Ἔ돫ꕢ븥ヲ캂䝄쟐颺ᓾ둾Ұ껗礞ᾰ瘹蒯硳풛瞋襎奺熝妒컚쉴⿂㽝㝳駵鈚䄖戭䌸ᇁ䙪鸮ᐴ稫ⶭ뀟ھ⦿' +
|
|
||||||
'䴳稉ꉕ捈袿놾띐✯伤䃫⸧ꠏ瘌틳藔ˋ㫣敀䔩㭘식↴⧵佶痊牌ꪌ搒꾛æᤈべ쉴挜敗羥誜嘳ֶꫜ걵ࣀ묟ኋ拃秷膤䨸菥' +
|
|
||||||
'䟆곘縧멀煣卲챸⧃⏶혣ਔ뙞밺㊑ک씌촃Ȅ頰ᖅ懚ホῐ꠷㯢먈㝹밷㮇䘖桲阥黾噘烻ᓧ鈠ᴥ徰穆ꘛ蹕綻表鯍裊' +
|
|
||||||
'鮕漨踒ꠍ픸Ä☶莒浏钸목탬툖氭ˠٸ൪㤌ᶟ訧ᜒೳ揪Ⴛ摖㸣᳑걀ꢢ䏹ῖ"';
|
|
||||||
|
|
||||||
describe('routesUtils.isValidObjectKey', () => {
|
|
||||||
it('should return isValid false if object key name starts with a ' +
|
|
||||||
'blacklisted prefix', () => {
|
|
||||||
const result = routesUtils.isValidObjectKey('bannedkey', [bannedStr]);
|
|
||||||
// return { isValid: false, invalidPrefix };
|
|
||||||
assert.strictEqual(result.isValid, false);
|
|
||||||
assert.strictEqual(result.invalidPrefix, bannedStr);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return isValid false if object key name exceeds length of 915',
|
|
||||||
() => {
|
|
||||||
const key = 'a'.repeat(916);
|
|
||||||
const result = routesUtils.isValidObjectKey(key, prefixBlacklist);
|
|
||||||
assert.strictEqual(result.isValid, false);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return isValid true for a utf8 string of byte size 915', () => {
|
|
||||||
const result = routesUtils.isValidObjectKey(keyutf8, prefixBlacklist);
|
|
||||||
assert.strictEqual(result.isValid, true);
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -1,76 +0,0 @@
|
||||||
'use strict'; //eslint-disable-line
|
|
||||||
|
|
||||||
const assert = require('assert');
|
|
||||||
|
|
||||||
const { markerFilterMPU } =
|
|
||||||
require('../../../../../lib/storage/metadata/in_memory/bucket_utilities');
|
|
||||||
|
|
||||||
function dupeArray(arr) {
|
|
||||||
const dupe = [];
|
|
||||||
|
|
||||||
arr.forEach(i => {
|
|
||||||
dupe.push(Object.assign({}, i));
|
|
||||||
});
|
|
||||||
|
|
||||||
return dupe;
|
|
||||||
}
|
|
||||||
|
|
||||||
describe('bucket utility methods for in_memory backend', () => {
|
|
||||||
it('should return an array of multipart uploads starting with the item ' +
|
|
||||||
'right after the specified keyMarker and uploadIdMarker', () => {
|
|
||||||
const mpus = [
|
|
||||||
{
|
|
||||||
key: 'key-1',
|
|
||||||
uploadId: '2624ca6080c841d48a2481941df868a9',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
key: 'key-1',
|
|
||||||
uploadId: '4ffeca96b0c24ea9b538b8f0b60cede3',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
key: 'key-1',
|
|
||||||
uploadId: '52e5b94474894990a2b94330bb3c8fa9',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
key: 'key-1',
|
|
||||||
uploadId: '54e530c5d4c741898c8e161d426591cb',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
key: 'key-1',
|
|
||||||
uploadId: '6cc59f9d29254e81ab6cb6332fb46314',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
key: 'key-1',
|
|
||||||
uploadId: 'fe9dd10776c9476697632d0b55960a05',
|
|
||||||
},
|
|
||||||
{
|
|
||||||
key: 'key-2',
|
|
||||||
uploadId: '68e24ccb96c14beea79bf01fc130fdf5',
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
[
|
|
||||||
{
|
|
||||||
keyMarker: 'key-1',
|
|
||||||
uploadIdMarker: '54e530c5d4c741898c8e161d426591cb',
|
|
||||||
expected: 3,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
keyMarker: 'key-2',
|
|
||||||
uploadIdMarker: '68e24ccb96c14beea79bf01fc130fdf5',
|
|
||||||
expected: 0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
keyMarker: 'key-1',
|
|
||||||
uploadIdMarker: '2624ca6080c841d48a2481941df868a9',
|
|
||||||
expected: 6,
|
|
||||||
},
|
|
||||||
].forEach(item => {
|
|
||||||
const res = markerFilterMPU(item, dupeArray(mpus));
|
|
||||||
assert.equal(res.length, item.expected);
|
|
||||||
|
|
||||||
const expected = mpus.slice(mpus.length - res.length);
|
|
||||||
assert.deepStrictEqual(res, expected);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -1,502 +0,0 @@
|
||||||
const assert = require('assert');
|
|
||||||
|
|
||||||
const {
|
|
||||||
NEW_OBJ,
|
|
||||||
NEW_VER,
|
|
||||||
UPDATE_VER,
|
|
||||||
UPDATE_MST,
|
|
||||||
RESTORE,
|
|
||||||
DEL_VER,
|
|
||||||
DEL_MST,
|
|
||||||
DataCounter,
|
|
||||||
} = require('../../../../../lib/storage/metadata/mongoclient/DataCounter');
|
|
||||||
|
|
||||||
const refZeroObj = {
|
|
||||||
objects: 0,
|
|
||||||
versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 0, prev: 0 },
|
|
||||||
byLocation: {},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const refSingleObj = {
|
|
||||||
objects: 2,
|
|
||||||
versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 200, prev: 0 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 200, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const refSingleObjVer = {
|
|
||||||
objects: 1,
|
|
||||||
versions: 1,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 100, prev: 100 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 100, prev: 100 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const refMultiObjVer = {
|
|
||||||
objects: 1,
|
|
||||||
versions: 1,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 200, prev: 200 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 100, prev: 100 },
|
|
||||||
locationTwo: { curr: 100, prev: 100 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const refMultiObj = {
|
|
||||||
objects: 2,
|
|
||||||
versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 400, prev: 0 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 200, prev: 0 },
|
|
||||||
locationTwo: { curr: 200, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const singleSite = size => ({
|
|
||||||
'content-length': size,
|
|
||||||
'dataStoreName': 'locationOne',
|
|
||||||
'replicationInfo': {
|
|
||||||
backends: [],
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
const multiSite = (size, isComplete) => ({
|
|
||||||
'content-length': size,
|
|
||||||
'dataStoreName': 'locationOne',
|
|
||||||
'replicationInfo': {
|
|
||||||
backends: [{
|
|
||||||
site: 'locationTwo',
|
|
||||||
status: isComplete ? 'COMPLETED' : 'PENDING',
|
|
||||||
}],
|
|
||||||
},
|
|
||||||
});
|
|
||||||
|
|
||||||
const transientSite = (size, status, backends) => ({
|
|
||||||
'content-length': size,
|
|
||||||
'dataStoreName': 'locationOne',
|
|
||||||
'replicationInfo': { status, backends },
|
|
||||||
});
|
|
||||||
|
|
||||||
const locationConstraints = {
|
|
||||||
locationOne: { isTransient: true },
|
|
||||||
locationTwo: { isTransient: false },
|
|
||||||
};
|
|
||||||
|
|
||||||
const dataCounter = new DataCounter();
|
|
||||||
|
|
||||||
describe('DataCounter Class', () => {
|
|
||||||
it('should create a zero object', () => {
|
|
||||||
dataCounter.set(refZeroObj);
|
|
||||||
assert.deepStrictEqual(dataCounter.results(), refZeroObj);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should skip dataCounter methods if initial values are not set', () => {
|
|
||||||
const testCounter = new DataCounter();
|
|
||||||
testCounter.addObject(singleSite(100), null, NEW_OBJ);
|
|
||||||
assert.deepStrictEqual(testCounter.results(), refZeroObj);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('DateCounter::updateTransientList', () => {
|
|
||||||
afterEach(() => dataCounter.updateTransientList({}));
|
|
||||||
it('should set transient list', () => {
|
|
||||||
assert.deepStrictEqual(dataCounter.transientList, {});
|
|
||||||
dataCounter.updateTransientList(locationConstraints);
|
|
||||||
const expectedRes = { locationOne: true, locationTwo: false };
|
|
||||||
assert.deepStrictEqual(dataCounter.transientList, expectedRes);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('DataCounter::addObject', () => {
|
|
||||||
const tests = [
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, new object one site',
|
|
||||||
init: refZeroObj,
|
|
||||||
input: [singleSite(100), null, NEW_OBJ],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 1, versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 100, prev: 0 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 100, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, new object multi site',
|
|
||||||
init: refZeroObj,
|
|
||||||
input: [multiSite(100, true), null, NEW_OBJ],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 1, versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 200, prev: 0 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 100, prev: 0 },
|
|
||||||
locationTwo: { curr: 100, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, overwrite single site',
|
|
||||||
init: refSingleObj,
|
|
||||||
input: [singleSite(100), singleSite(50), NEW_OBJ],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 2, versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 250, prev: 0 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 250, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, overwrite multi site',
|
|
||||||
init: refMultiObj,
|
|
||||||
input: [multiSite(100, true), multiSite(50, true), NEW_OBJ],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 2, versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 500, prev: 0 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 250, prev: 0 },
|
|
||||||
locationTwo: { curr: 250, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, new version single site',
|
|
||||||
init: refSingleObj,
|
|
||||||
input: [singleSite(100), singleSite(50), NEW_VER],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 2, versions: 1,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 250, prev: 50 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 250, prev: 50 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, new version multi site',
|
|
||||||
init: refMultiObj,
|
|
||||||
input: [multiSite(100, true), multiSite(50, true), NEW_VER],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 2, versions: 1,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 500, prev: 100 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 250, prev: 50 },
|
|
||||||
locationTwo: { curr: 250, prev: 50 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should correctly ignore pending status, multi site',
|
|
||||||
init: refZeroObj,
|
|
||||||
input: [multiSite(100, false), null, NEW_OBJ],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 1, versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 100, prev: 0 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 100, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, ' +
|
|
||||||
'replication completion update in master object',
|
|
||||||
init: refSingleObj,
|
|
||||||
input: [multiSite(100, true), multiSite(100, false), UPDATE_MST],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 2, versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 300, prev: 0 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 200, prev: 0 },
|
|
||||||
locationTwo: { curr: 100, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, ' +
|
|
||||||
'replication completion update in versioned object',
|
|
||||||
init: refSingleObjVer,
|
|
||||||
input: [multiSite(100, true), multiSite(100, false), UPDATE_VER],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 1, versions: 1,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 100, prev: 200 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 100, prev: 100 },
|
|
||||||
locationTwo: { curr: 0, prev: 100 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, ' +
|
|
||||||
'restoring versioned object as master',
|
|
||||||
init: refMultiObjVer,
|
|
||||||
input: [multiSite(100, true), multiSite(100, true), RESTORE],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 2, versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 400, prev: 0 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 200, prev: 0 },
|
|
||||||
locationTwo: { curr: 200, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
];
|
|
||||||
tests.forEach(test => it(test.it, () => {
|
|
||||||
const { expectedRes, input, init } = test;
|
|
||||||
dataCounter.set(init);
|
|
||||||
dataCounter.addObject(...input);
|
|
||||||
const testResults = dataCounter.results();
|
|
||||||
Object.keys(expectedRes).forEach(key => {
|
|
||||||
if (typeof expectedRes[key] === 'object') {
|
|
||||||
assert.deepStrictEqual(testResults[key], expectedRes[key]);
|
|
||||||
} else {
|
|
||||||
assert.strictEqual(testResults[key], expectedRes[key]);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}));
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('DataCounter, update with transient location', () => {
|
|
||||||
before(() => dataCounter.updateTransientList(locationConstraints));
|
|
||||||
after(() => dataCounter.updateTransientList({}));
|
|
||||||
|
|
||||||
const pCurrMD = transientSite(100, 'PENDING', [
|
|
||||||
{ site: 'site1', status: 'PENDING' },
|
|
||||||
{ site: 'site2', status: 'COMPLETED' },
|
|
||||||
]);
|
|
||||||
const cCurrMD = transientSite(100, 'COMPLETED', [
|
|
||||||
{ site: 'site1', status: 'COMPLETED' },
|
|
||||||
{ site: 'site2', status: 'COMPLETED' },
|
|
||||||
]);
|
|
||||||
const prevMD = transientSite(100, 'PENDING', [
|
|
||||||
{ site: 'site1', status: 'PENDING' },
|
|
||||||
{ site: 'site2', status: 'PENDING' },
|
|
||||||
]);
|
|
||||||
const transientTest = [
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, ' +
|
|
||||||
'version object, replication status = PENDING',
|
|
||||||
init: refSingleObjVer,
|
|
||||||
input: [pCurrMD, prevMD, UPDATE_VER],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 1, versions: 1,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 100, prev: 200 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 100, prev: 100 },
|
|
||||||
site2: { curr: 0, prev: 100 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, ' +
|
|
||||||
'version object, replication status = COMPLETED',
|
|
||||||
init: refSingleObjVer,
|
|
||||||
input: [cCurrMD, prevMD, UPDATE_VER],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 1, versions: 1,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 100, prev: 200 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 100, prev: 0 },
|
|
||||||
site1: { curr: 0, prev: 100 },
|
|
||||||
site2: { curr: 0, prev: 100 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, ' +
|
|
||||||
'master object, replication status = PENDING',
|
|
||||||
init: refSingleObjVer,
|
|
||||||
input: [pCurrMD, prevMD, UPDATE_MST],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 1, versions: 1,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 200, prev: 100 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 100, prev: 100 },
|
|
||||||
site2: { curr: 100, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, ' +
|
|
||||||
'master object, replication status = COMPLETED',
|
|
||||||
init: refSingleObjVer,
|
|
||||||
input: [cCurrMD, prevMD, UPDATE_MST],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 1, versions: 1,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 200, prev: 100 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 0, prev: 100 },
|
|
||||||
site1: { curr: 100, prev: 0 },
|
|
||||||
site2: { curr: 100, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
transientTest.forEach(test => it(test.it, () => {
|
|
||||||
const { expectedRes, input, init } = test;
|
|
||||||
dataCounter.set(init);
|
|
||||||
dataCounter.addObject(...input);
|
|
||||||
const testResults = dataCounter.results();
|
|
||||||
Object.keys(expectedRes).forEach(key => {
|
|
||||||
if (typeof expectedRes[key] === 'object') {
|
|
||||||
assert.deepStrictEqual(testResults[key], expectedRes[key]);
|
|
||||||
} else {
|
|
||||||
assert.strictEqual(testResults[key], expectedRes[key]);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}));
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('DataCounter::delObject', () => {
|
|
||||||
const tests = [
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, ' +
|
|
||||||
'delete master object single site',
|
|
||||||
init: refMultiObj,
|
|
||||||
input: [singleSite(100), DEL_MST],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 1, versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 300, prev: 0 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 100, prev: 0 },
|
|
||||||
locationTwo: { curr: 200, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, ' +
|
|
||||||
'delete master object multi site',
|
|
||||||
init: refMultiObj,
|
|
||||||
input: [multiSite(100, true), DEL_MST],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 1, versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 200, prev: 0 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 100, prev: 0 },
|
|
||||||
locationTwo: { curr: 100, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, ' +
|
|
||||||
'delete versioned object single site',
|
|
||||||
init: refMultiObjVer,
|
|
||||||
input: [singleSite(100), DEL_VER],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 1, versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 200, prev: 100 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 100, prev: 0 },
|
|
||||||
locationTwo: { curr: 100, prev: 100 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should correctly update DataCounter, ' +
|
|
||||||
'delete versioned object multi site',
|
|
||||||
init: refMultiObjVer,
|
|
||||||
input: [multiSite(100, true), DEL_VER],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 1, versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 200, prev: 0 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 100, prev: 0 },
|
|
||||||
locationTwo: { curr: 100, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should clamp negative values to 0, master object',
|
|
||||||
init: refMultiObjVer,
|
|
||||||
input: [multiSite(300, true), DEL_MST],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 0, versions: 1,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 0, prev: 200 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 0, prev: 100 },
|
|
||||||
locationTwo: { curr: 0, prev: 100 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should clamp negative values to 0, versioned object',
|
|
||||||
init: refMultiObjVer,
|
|
||||||
input: [multiSite(300, true), DEL_VER],
|
|
||||||
expectedRes: {
|
|
||||||
objects: 1, versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 200, prev: 0 },
|
|
||||||
byLocation: {
|
|
||||||
locationOne: { curr: 100, prev: 0 },
|
|
||||||
locationTwo: { curr: 100, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
tests.forEach(test => it(test.it, () => {
|
|
||||||
const { expectedRes, input, init } = test;
|
|
||||||
dataCounter.set(init);
|
|
||||||
dataCounter.delObject(...input);
|
|
||||||
const testResults = dataCounter.results();
|
|
||||||
Object.keys(expectedRes).forEach(key => {
|
|
||||||
if (typeof expectedRes[key] === 'object') {
|
|
||||||
assert.deepStrictEqual(testResults[key], expectedRes[key]);
|
|
||||||
} else {
|
|
||||||
assert.strictEqual(testResults[key], expectedRes[key]);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}));
|
|
||||||
});
|
|
|
@ -1,274 +0,0 @@
|
||||||
const assert = require('assert');
|
|
||||||
const { Timestamp } = require('bson');
|
|
||||||
|
|
||||||
const ListRecordStream = require(
|
|
||||||
'../../../../../lib/storage/metadata/mongoclient/ListRecordStream');
|
|
||||||
const DummyRequestLogger = require('./utils/DummyRequestLogger');
|
|
||||||
|
|
||||||
const logger = new DummyRequestLogger();
|
|
||||||
|
|
||||||
const mongoProcessedLogEntries = {
|
|
||||||
insert: {
|
|
||||||
h: -42,
|
|
||||||
ts: Timestamp.fromNumber(42),
|
|
||||||
op: 'i',
|
|
||||||
ns: 'metadata.replicated-bucket',
|
|
||||||
o: {
|
|
||||||
_id: 'replicated-key\u000098467518084696999999RG001 19.3',
|
|
||||||
value: {
|
|
||||||
someField: 'someValue',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
updateObject: {
|
|
||||||
h: -42,
|
|
||||||
ts: Timestamp.fromNumber(42),
|
|
||||||
op: 'u',
|
|
||||||
ns: 'metadata.replicated-bucket',
|
|
||||||
o2: {
|
|
||||||
_id: 'replicated-key\u000098467518084696999999RG001 19.3',
|
|
||||||
},
|
|
||||||
o: {
|
|
||||||
$set: {
|
|
||||||
value: {
|
|
||||||
someField: 'someUpdatedValue',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
deleteObject: {
|
|
||||||
h: -42,
|
|
||||||
ts: Timestamp.fromNumber(42),
|
|
||||||
op: 'd',
|
|
||||||
ns: 'metadata.replicated-bucket',
|
|
||||||
o: {
|
|
||||||
_id: 'replicated-key\u000098467518084696999999RG001 19.3',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
putBucketAttributes: {
|
|
||||||
h: -42,
|
|
||||||
ts: Timestamp.fromNumber(42),
|
|
||||||
op: 'u',
|
|
||||||
ns: 'metadata.__metastore',
|
|
||||||
o2: {
|
|
||||||
_id: 'new-bucket',
|
|
||||||
}, o: {
|
|
||||||
_id: 'new-bucket',
|
|
||||||
value: {
|
|
||||||
someField: 'someValue',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
deleteBucket: {
|
|
||||||
h: -42,
|
|
||||||
ts: Timestamp.fromNumber(42),
|
|
||||||
op: 'd',
|
|
||||||
ns: 'metadata.__metastore',
|
|
||||||
o: {
|
|
||||||
_id: 'new-bucket',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const mongoIgnoredLogEntries = {
|
|
||||||
createBucket: {
|
|
||||||
h: -42,
|
|
||||||
ts: Timestamp.fromNumber(42),
|
|
||||||
op: 'c',
|
|
||||||
ns: 'metadata.$cmd',
|
|
||||||
o: {
|
|
||||||
create: 'new-bucket',
|
|
||||||
idIndex: {
|
|
||||||
v: 2,
|
|
||||||
key: { _id: 1 },
|
|
||||||
name: '_id_',
|
|
||||||
ns: 'metadata.new-bucket',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
dropBucketDb: {
|
|
||||||
h: -42,
|
|
||||||
ts: Timestamp.fromNumber(42),
|
|
||||||
op: 'c',
|
|
||||||
ns: 'metadata.$cmd',
|
|
||||||
o: {
|
|
||||||
drop: 'new-bucket',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const expectedStreamEntries = {
|
|
||||||
insert: {
|
|
||||||
db: 'replicated-bucket',
|
|
||||||
entries: [
|
|
||||||
{
|
|
||||||
key: 'replicated-key\u000098467518084696999999RG001 19.3',
|
|
||||||
type: 'put',
|
|
||||||
value: '{"someField":"someValue"}',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
timestamp: new Date(42000),
|
|
||||||
},
|
|
||||||
updateObject: {
|
|
||||||
db: 'replicated-bucket',
|
|
||||||
entries: [
|
|
||||||
{
|
|
||||||
key: 'replicated-key\u000098467518084696999999RG001 19.3',
|
|
||||||
type: 'put',
|
|
||||||
value: '{"someField":"someUpdatedValue"}',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
timestamp: new Date(42000),
|
|
||||||
},
|
|
||||||
deleteObject: {
|
|
||||||
db: 'replicated-bucket',
|
|
||||||
entries: [
|
|
||||||
{
|
|
||||||
key: 'replicated-key\u000098467518084696999999RG001 19.3',
|
|
||||||
type: 'delete',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
timestamp: new Date(42000),
|
|
||||||
},
|
|
||||||
putBucketAttributes: {
|
|
||||||
db: '__metastore',
|
|
||||||
entries: [
|
|
||||||
{
|
|
||||||
key: 'new-bucket',
|
|
||||||
type: 'put',
|
|
||||||
value: '{"someField":"someValue"}',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
timestamp: new Date(42000),
|
|
||||||
},
|
|
||||||
deleteBucket: {
|
|
||||||
db: '__metastore',
|
|
||||||
entries: [
|
|
||||||
{
|
|
||||||
key: 'new-bucket',
|
|
||||||
type: 'delete',
|
|
||||||
},
|
|
||||||
],
|
|
||||||
timestamp: new Date(42000),
|
|
||||||
},
|
|
||||||
dropBucketDb: {
|
|
||||||
h: -42,
|
|
||||||
op: 'c',
|
|
||||||
ns: 'metadata.$cmd',
|
|
||||||
o: {
|
|
||||||
drop: 'new-bucket',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
describe('mongoclient.ListRecordStream', () => {
|
|
||||||
const lastEndIDEntry = {
|
|
||||||
h: -43,
|
|
||||||
ts: Timestamp.fromNumber(42),
|
|
||||||
};
|
|
||||||
Object.keys(mongoProcessedLogEntries).forEach(entryType => {
|
|
||||||
it(`should transform ${entryType}`, done => {
|
|
||||||
const lrs = new ListRecordStream(logger,
|
|
||||||
lastEndIDEntry.h.toString());
|
|
||||||
let dataReceived = false;
|
|
||||||
lrs.on('info', info => {
|
|
||||||
assert(dataReceived);
|
|
||||||
const parsedInfo = info;
|
|
||||||
parsedInfo.end = JSON.parse(parsedInfo.end);
|
|
||||||
assert.deepStrictEqual(parsedInfo, {
|
|
||||||
end: { ts: 42, uniqID: '-42' },
|
|
||||||
});
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
lrs.on('data', entry => {
|
|
||||||
assert.deepStrictEqual(entry, expectedStreamEntries[entryType]);
|
|
||||||
dataReceived = true;
|
|
||||||
});
|
|
||||||
// first write will be ignored by ListRecordStream because
|
|
||||||
// of the last end ID (-42), it's needed though to bootstrap it
|
|
||||||
lrs.write(lastEndIDEntry);
|
|
||||||
lrs.write(mongoProcessedLogEntries[entryType]);
|
|
||||||
lrs.end();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
it('should ignore other entry types', done => {
|
|
||||||
const lrs = new ListRecordStream(logger, lastEndIDEntry.h.toString());
|
|
||||||
let infoEmitted = false;
|
|
||||||
lrs.on('info', info => {
|
|
||||||
const parsedInfo = info;
|
|
||||||
parsedInfo.end = JSON.parse(parsedInfo.end);
|
|
||||||
assert.deepStrictEqual(parsedInfo, {
|
|
||||||
end: { ts: 42, uniqID: '-42' },
|
|
||||||
});
|
|
||||||
infoEmitted = true;
|
|
||||||
});
|
|
||||||
lrs.on('data', entry => {
|
|
||||||
assert(false, `ListRecordStream did not ignore entry ${entry}`);
|
|
||||||
});
|
|
||||||
lrs.on('end', () => {
|
|
||||||
assert(infoEmitted);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
// first write will be ignored by ListRecordStream because
|
|
||||||
// of the last end ID (-43), it's needed though to bootstrap it
|
|
||||||
lrs.write(lastEndIDEntry);
|
|
||||||
Object.keys(mongoIgnoredLogEntries).forEach(entryType => {
|
|
||||||
lrs.write(mongoIgnoredLogEntries[entryType]);
|
|
||||||
});
|
|
||||||
lrs.end();
|
|
||||||
});
|
|
||||||
it('should emit info even if no entry consumed', done => {
|
|
||||||
const lrs = new ListRecordStream(logger, lastEndIDEntry.h.toString());
|
|
||||||
let infoEmitted = false;
|
|
||||||
lrs.on('info', info => {
|
|
||||||
const parsedInfo = info;
|
|
||||||
parsedInfo.end = JSON.parse(parsedInfo.end);
|
|
||||||
assert.deepStrictEqual(parsedInfo, {
|
|
||||||
end: { ts: 0, uniqID: null },
|
|
||||||
});
|
|
||||||
infoEmitted = true;
|
|
||||||
});
|
|
||||||
lrs.on('data', () => {
|
|
||||||
assert(false, 'did not expect data from ListRecordStream');
|
|
||||||
});
|
|
||||||
lrs.on('end', () => {
|
|
||||||
assert(infoEmitted);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
lrs.end();
|
|
||||||
});
|
|
||||||
it('should skip entries until uniqID is encountered', done => {
|
|
||||||
const logEntries = [
|
|
||||||
Object.assign({}, mongoProcessedLogEntries.insert,
|
|
||||||
{ h: 1234 }),
|
|
||||||
Object.assign({}, mongoProcessedLogEntries.insert,
|
|
||||||
{ h: 5678 }),
|
|
||||||
Object.assign({}, mongoProcessedLogEntries.insert,
|
|
||||||
{ h: -1234 }),
|
|
||||||
Object.assign({}, mongoProcessedLogEntries.insert,
|
|
||||||
{ h: 2345 }),
|
|
||||||
];
|
|
||||||
const lrs = new ListRecordStream(logger, '5678');
|
|
||||||
let nbReceivedEntries = 0;
|
|
||||||
let infoEmitted = false;
|
|
||||||
lrs.on('info', info => {
|
|
||||||
infoEmitted = true;
|
|
||||||
const parsedInfo = info;
|
|
||||||
parsedInfo.end = JSON.parse(parsedInfo.end);
|
|
||||||
assert.deepStrictEqual(parsedInfo, {
|
|
||||||
end: { ts: 42, uniqID: '2345' },
|
|
||||||
});
|
|
||||||
});
|
|
||||||
lrs.on('data', entry => {
|
|
||||||
assert.deepStrictEqual(entry, expectedStreamEntries.insert);
|
|
||||||
++nbReceivedEntries;
|
|
||||||
});
|
|
||||||
lrs.on('end', () => {
|
|
||||||
assert.strictEqual(nbReceivedEntries, 2);
|
|
||||||
assert(infoEmitted);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
logEntries.forEach(entry => lrs.write(entry));
|
|
||||||
lrs.end();
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -1,538 +0,0 @@
|
||||||
const assert = require('assert');
|
|
||||||
|
|
||||||
const MongoClientInterface = require(
|
|
||||||
'../../../../../lib/storage/metadata/mongoclient/MongoClientInterface');
|
|
||||||
const DummyMongoDB = require('./utils/DummyMongoDB');
|
|
||||||
const DummyConfigObject = require('./utils/DummyConfigObject');
|
|
||||||
const DummyRequestLogger = require('./utils/DummyRequestLogger');
|
|
||||||
const { generateMD } = require('./utils/helper');
|
|
||||||
|
|
||||||
const log = new DummyRequestLogger();
|
|
||||||
const mongoTestClient = new MongoClientInterface({});
|
|
||||||
mongoTestClient.db = new DummyMongoDB();
|
|
||||||
|
|
||||||
const bucketName = 'mongoTestBucket';
|
|
||||||
const objectName = 'mongoTestObject';
|
|
||||||
|
|
||||||
const zeroRef = {
|
|
||||||
objects: 0,
|
|
||||||
versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 0, prev: 0 },
|
|
||||||
byLocation: {},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const startRef = {
|
|
||||||
objects: 10,
|
|
||||||
versions: 10,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 1000, prev: 1000 },
|
|
||||||
byLocation: {
|
|
||||||
mongotest: { curr: 1000, prev: 1000 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
function assertSuccessResults(testParams, cb) {
|
|
||||||
const { newVal, retValues, initRef, resRef, params } = testParams;
|
|
||||||
mongoTestClient.dataCount.set(initRef);
|
|
||||||
mongoTestClient.db.setReturnValues(retValues);
|
|
||||||
assert.deepStrictEqual(mongoTestClient.dataCount.results(), initRef);
|
|
||||||
mongoTestClient.putObject(bucketName, objectName, newVal, params, log,
|
|
||||||
err => {
|
|
||||||
assert.ifError(err, `Expected success, but got error ${err}`);
|
|
||||||
assert.deepStrictEqual(
|
|
||||||
mongoTestClient.dataCount.results(), resRef);
|
|
||||||
cb();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function assertFailureResults(testParams, cb) {
|
|
||||||
const { newVal, retValues, initRef, params } = testParams;
|
|
||||||
mongoTestClient.db.fail = true;
|
|
||||||
mongoTestClient.dataCount.set(initRef);
|
|
||||||
mongoTestClient.db.setReturnValues(retValues);
|
|
||||||
assert.deepStrictEqual(mongoTestClient.dataCount.results(), initRef);
|
|
||||||
mongoTestClient.putObject(bucketName, objectName, newVal, params, log,
|
|
||||||
err => {
|
|
||||||
assert(err, 'Expected error, but got success');
|
|
||||||
assert.deepStrictEqual(
|
|
||||||
mongoTestClient.dataCount.results(), initRef);
|
|
||||||
cb();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
describe('MongoClientInterface, init behavior', () => {
|
|
||||||
let s3ConfigObj;
|
|
||||||
const locationConstraints = {
|
|
||||||
locationOne: { isTransient: true },
|
|
||||||
locationTwo: { isTransient: false },
|
|
||||||
};
|
|
||||||
|
|
||||||
beforeEach(() => {
|
|
||||||
s3ConfigObj = new DummyConfigObject();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should set DataCounter transientList when declaring a ' +
|
|
||||||
'new MongoClientInterface object', () => {
|
|
||||||
s3ConfigObj.setLocationConstraints(locationConstraints);
|
|
||||||
const mongoClient = new MongoClientInterface({ config: s3ConfigObj });
|
|
||||||
const expectedRes = { locationOne: true, locationTwo: false };
|
|
||||||
assert.deepStrictEqual(
|
|
||||||
mongoClient.dataCount.transientList, expectedRes);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should update DataCounter transientList if location constraints ' +
|
|
||||||
'are updated', done => {
|
|
||||||
const mongoClient = new MongoClientInterface({ config: s3ConfigObj });
|
|
||||||
assert.deepStrictEqual(mongoClient.dataCount.transientList, {});
|
|
||||||
const expectedRes = { locationOne: true, locationTwo: false };
|
|
||||||
s3ConfigObj.once('MongoClientTestDone', () => {
|
|
||||||
assert.deepStrictEqual(
|
|
||||||
mongoClient.dataCount.transientList, expectedRes);
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
s3ConfigObj.setLocationConstraints(locationConstraints);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('MongoClientInterface::dataCount', () => {
|
|
||||||
describe('MongoClientInterface::putObject', () => {
|
|
||||||
beforeEach(() => {
|
|
||||||
mongoTestClient.db.reset();
|
|
||||||
});
|
|
||||||
|
|
||||||
const failTests = [
|
|
||||||
{
|
|
||||||
it: 'should not add count when NonVer put object call fails',
|
|
||||||
params: {},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should not add count when verCase1 put object call fails',
|
|
||||||
params: { versioning: true },
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should not add count when verCase2 put object call fails',
|
|
||||||
params: { versionId: '' },
|
|
||||||
},
|
|
||||||
{
|
|
||||||
it: 'should not add count when verCase3 put object call fails',
|
|
||||||
params: { versionId: 'vercase' },
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
failTests.forEach(test => it(test.it, done => {
|
|
||||||
const retValues = [];
|
|
||||||
const newVal = generateMD(objectName, 200);
|
|
||||||
const testParams = {
|
|
||||||
newVal,
|
|
||||||
retValues,
|
|
||||||
initRef: zeroRef,
|
|
||||||
params: test.params,
|
|
||||||
};
|
|
||||||
assertFailureResults(testParams, done);
|
|
||||||
}));
|
|
||||||
|
|
||||||
it('should call putObjectNonVer and add object',
|
|
||||||
done => {
|
|
||||||
const retValues = [];
|
|
||||||
const newVal = generateMD(objectName, 200, '',
|
|
||||||
[{ site: 'repsite', status: 'COMPLETED' }]);
|
|
||||||
const expectedRes = {
|
|
||||||
objects: 1,
|
|
||||||
versions: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 400, prev: 0 },
|
|
||||||
byLocation: {
|
|
||||||
mongotest: { curr: 200, prev: 0 },
|
|
||||||
repsite: { curr: 200, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
const testParams = {
|
|
||||||
newVal,
|
|
||||||
retValues,
|
|
||||||
initRef: zeroRef,
|
|
||||||
resRef: expectedRes,
|
|
||||||
params: {},
|
|
||||||
};
|
|
||||||
assertSuccessResults(testParams, done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should call putObjectNonVer and add object, overwrite',
|
|
||||||
done => {
|
|
||||||
const retValues = [
|
|
||||||
{ _id: objectName, value: generateMD(objectName, 100) },
|
|
||||||
];
|
|
||||||
const newVal = generateMD(objectName, 200, '',
|
|
||||||
[{ site: 'repsite', status: 'COMPLETED' }]);
|
|
||||||
const expectedRes = {
|
|
||||||
objects: 10,
|
|
||||||
versions: 10,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 1300, prev: 1000 },
|
|
||||||
byLocation: {
|
|
||||||
mongotest: { curr: 1100, prev: 1000 },
|
|
||||||
repsite: { curr: 200, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
const testParams = {
|
|
||||||
newVal,
|
|
||||||
retValues,
|
|
||||||
initRef: startRef,
|
|
||||||
resRef: expectedRes,
|
|
||||||
params: {},
|
|
||||||
};
|
|
||||||
assertSuccessResults(testParams, done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should call putObjectVerCase1 and add versioned object',
|
|
||||||
done => {
|
|
||||||
const retValues = [
|
|
||||||
{ _id: objectName, value: generateMD(objectName, 100) },
|
|
||||||
];
|
|
||||||
const newVal = generateMD(objectName, 200, '',
|
|
||||||
[{ site: 'repsite', status: 'COMPLETED' }]);
|
|
||||||
const expectedRes = {
|
|
||||||
objects: 10,
|
|
||||||
versions: 11,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 1300, prev: 1100 },
|
|
||||||
byLocation: {
|
|
||||||
mongotest: { curr: 1100, prev: 1100 },
|
|
||||||
repsite: { curr: 200, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
const testParams = {
|
|
||||||
newVal,
|
|
||||||
retValues,
|
|
||||||
initRef: startRef,
|
|
||||||
resRef: expectedRes,
|
|
||||||
params: { versioning: true },
|
|
||||||
};
|
|
||||||
assertSuccessResults(testParams, done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should call putObjectVerCase2 and add null versioned object',
|
|
||||||
done => {
|
|
||||||
const retValues = [
|
|
||||||
{ _id: objectName, value: generateMD(objectName, 100) },
|
|
||||||
];
|
|
||||||
const newVal = generateMD(objectName, 200, '',
|
|
||||||
[{ site: 'repsite', status: 'COMPLETED' }]);
|
|
||||||
const expectedRes = {
|
|
||||||
objects: 10,
|
|
||||||
versions: 10,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 1300, prev: 1000 },
|
|
||||||
byLocation: {
|
|
||||||
mongotest: { curr: 1100, prev: 1000 },
|
|
||||||
repsite: { curr: 200, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
const testParams = {
|
|
||||||
newVal,
|
|
||||||
retValues,
|
|
||||||
initRef: startRef,
|
|
||||||
resRef: expectedRes,
|
|
||||||
params: { versionId: '' },
|
|
||||||
};
|
|
||||||
assertSuccessResults(testParams, done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should call putObjectVerCase3 and update versioned object',
|
|
||||||
done => {
|
|
||||||
const retValues = [
|
|
||||||
[
|
|
||||||
{ _id: objectName, value: generateMD(objectName, 100, '',
|
|
||||||
[{ site: 'repsite', status: 'COMPLETED' },
|
|
||||||
{ site: 'repsite2', status: 'PENDING' }]) },
|
|
||||||
],
|
|
||||||
null,
|
|
||||||
];
|
|
||||||
const newVal = generateMD(objectName, 100, '',
|
|
||||||
[
|
|
||||||
{ site: 'repsite', status: 'COMPLETED' },
|
|
||||||
{ site: 'repsite2', status: 'COMPLETED' },
|
|
||||||
]);
|
|
||||||
const initRef = {
|
|
||||||
objects: 10,
|
|
||||||
versions: 10,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 1000, prev: 1100 },
|
|
||||||
byLocation: {
|
|
||||||
mongotest: { curr: 1000, prev: 1000 },
|
|
||||||
repsite: { curr: 0, prev: 100 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
const expectedRes = {
|
|
||||||
objects: 10,
|
|
||||||
versions: 10,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 1000, prev: 1200 },
|
|
||||||
byLocation: {
|
|
||||||
mongotest: { curr: 1000, prev: 1000 },
|
|
||||||
repsite: { curr: 0, prev: 100 },
|
|
||||||
repsite2: { curr: 0, prev: 100 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
const testParams = {
|
|
||||||
newVal,
|
|
||||||
retValues,
|
|
||||||
initRef,
|
|
||||||
resRef: expectedRes,
|
|
||||||
params: { versionId: 'versioned' },
|
|
||||||
};
|
|
||||||
assertSuccessResults(testParams, done);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should call putObjectVerCase3 and update master object', done => {
|
|
||||||
const retValues = [
|
|
||||||
[
|
|
||||||
{ _id: objectName, value: generateMD(objectName, 100, '',
|
|
||||||
[{ site: 'repsite', status: 'COMPLETED' },
|
|
||||||
{ site: 'repsite2', status: 'PENDING' }]) },
|
|
||||||
{ _id: objectName, value: generateMD(objectName, 100, '',
|
|
||||||
[{ site: 'repsite', status: 'COMPLETED' },
|
|
||||||
{ site: 'repsite2', status: 'PENDING' }]) },
|
|
||||||
],
|
|
||||||
null,
|
|
||||||
];
|
|
||||||
const newVal = generateMD(objectName, 100, '',
|
|
||||||
[
|
|
||||||
{ site: 'repsite', status: 'COMPLETED' },
|
|
||||||
{ site: 'repsite2', status: 'COMPLETED' },
|
|
||||||
]);
|
|
||||||
const initRef = {
|
|
||||||
objects: 10,
|
|
||||||
versions: 10,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 1100, prev: 1000 },
|
|
||||||
byLocation: {
|
|
||||||
mongotest: { curr: 1000, prev: 1000 },
|
|
||||||
repsite: { curr: 100, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
const expectedRes = {
|
|
||||||
objects: 10,
|
|
||||||
versions: 10,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 1200, prev: 1000 },
|
|
||||||
byLocation: {
|
|
||||||
mongotest: { curr: 1000, prev: 1000 },
|
|
||||||
repsite: { curr: 100, prev: 0 },
|
|
||||||
repsite2: { curr: 100, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
const testParams = {
|
|
||||||
newVal,
|
|
||||||
retValues,
|
|
||||||
initRef,
|
|
||||||
resRef: expectedRes,
|
|
||||||
params: { versionId: 'master' },
|
|
||||||
};
|
|
||||||
assertSuccessResults(testParams, done);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('MongoClientInterface::_handleResults', () => {
|
|
||||||
it('should return zero-result', () => {
|
|
||||||
const testInput = {
|
|
||||||
masterCount: 0, masterData: {},
|
|
||||||
nullCount: 0, nullData: {},
|
|
||||||
versionCount: 0, versionData: {},
|
|
||||||
};
|
|
||||||
const testResults = mongoTestClient._handleResults(testInput, true);
|
|
||||||
const expectedRes = {
|
|
||||||
versions: 0, objects: 0,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 0, prev: 0 },
|
|
||||||
locations: {},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
assert.deepStrictEqual(testResults, expectedRes);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return correct value if isVer is false', () => {
|
|
||||||
const testInput = {
|
|
||||||
masterCount: 2, masterData: { test1: 10, test2: 10 },
|
|
||||||
nullCount: 2, nullData: { test1: 10, test2: 10 },
|
|
||||||
versionCount: 2, versionData: { test1: 20, test2: 20 },
|
|
||||||
};
|
|
||||||
const testResults = mongoTestClient._handleResults(testInput, false);
|
|
||||||
const expectedRes = {
|
|
||||||
versions: 0, objects: 4,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 40, prev: 0 },
|
|
||||||
locations: {
|
|
||||||
test1: { curr: 20, prev: 0 },
|
|
||||||
test2: { curr: 20, prev: 0 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
assert.deepStrictEqual(testResults, expectedRes);
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return correct value if isVer is true', () => {
|
|
||||||
const testInput = {
|
|
||||||
masterCount: 2, masterData: { test1: 10, test2: 10 },
|
|
||||||
nullCount: 2, nullData: { test1: 10, test2: 10 },
|
|
||||||
versionCount: 4, versionData: { test1: 20, test2: 20 },
|
|
||||||
};
|
|
||||||
const testResults = mongoTestClient._handleResults(testInput, true);
|
|
||||||
const expectedRes = {
|
|
||||||
versions: 2, objects: 4,
|
|
||||||
dataManaged: {
|
|
||||||
total: { curr: 40, prev: 20 },
|
|
||||||
locations: {
|
|
||||||
test1: { curr: 20, prev: 10 },
|
|
||||||
test2: { curr: 20, prev: 10 },
|
|
||||||
},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
assert.deepStrictEqual(testResults, expectedRes);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
describe('MongoClientInterface::_handleMongo', () => {
|
|
||||||
beforeEach(() => mongoTestClient.db.reset());
|
|
||||||
|
|
||||||
it('should return error if mongo aggregate fails', done => {
|
|
||||||
const retValues = [new Error('testError')];
|
|
||||||
mongoTestClient.db.setReturnValues(retValues);
|
|
||||||
const testCollection = mongoTestClient.db.collection('test');
|
|
||||||
mongoTestClient._handleMongo(testCollection, {}, false, log, err => {
|
|
||||||
assert(err, 'Expected error, but got success');
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return empty object if mongo aggregate has no results', done => {
|
|
||||||
const testCollection = mongoTestClient.db.collection('test');
|
|
||||||
mongoTestClient._handleMongo(testCollection, {}, false, log,
|
|
||||||
(err, res) => {
|
|
||||||
assert.ifError(err, `Expected success, but got error ${err}`);
|
|
||||||
assert.deepStrictEqual(res, {});
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return empty object if mongo aggregate has missing results',
|
|
||||||
done => {
|
|
||||||
const retValues = [[{
|
|
||||||
count: undefined,
|
|
||||||
data: undefined,
|
|
||||||
repData: undefined,
|
|
||||||
}]];
|
|
||||||
mongoTestClient.db.setReturnValues(retValues);
|
|
||||||
const testCollection = mongoTestClient.db.collection('test');
|
|
||||||
mongoTestClient._handleMongo(testCollection, {}, false, log,
|
|
||||||
(err, res) => {
|
|
||||||
assert.ifError(err, `Expected success, but got error ${err}`);
|
|
||||||
assert.deepStrictEqual(res, {});
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
const testRetValues = [[{
|
|
||||||
count: [{ _id: null, count: 100 }],
|
|
||||||
data: [
|
|
||||||
{ _id: 'locationone', bytes: 1000 },
|
|
||||||
{ _id: 'locationtwo', bytes: 1000 },
|
|
||||||
],
|
|
||||||
repData: [
|
|
||||||
{ _id: 'awsbackend', bytes: 500 },
|
|
||||||
{ _id: 'azurebackend', bytes: 500 },
|
|
||||||
{ _id: 'gcpbackend', bytes: 500 },
|
|
||||||
],
|
|
||||||
compData: [
|
|
||||||
{ _id: 'locationone', bytes: 500 },
|
|
||||||
{ _id: 'locationtwo', bytes: 500 },
|
|
||||||
],
|
|
||||||
}]];
|
|
||||||
|
|
||||||
it('should return correct results, transient false', done => {
|
|
||||||
mongoTestClient.db.setReturnValues(testRetValues);
|
|
||||||
const testCollection = mongoTestClient.db.collection('test');
|
|
||||||
mongoTestClient._handleMongo(testCollection, {}, false, log,
|
|
||||||
(err, res) => {
|
|
||||||
assert.ifError(err, `Expected success, but got error ${err}`);
|
|
||||||
assert.deepStrictEqual(res, {
|
|
||||||
count: 100,
|
|
||||||
data: {
|
|
||||||
locationone: 1000,
|
|
||||||
locationtwo: 1000,
|
|
||||||
awsbackend: 500,
|
|
||||||
azurebackend: 500,
|
|
||||||
gcpbackend: 500,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should return correct results, transient true', done => {
|
|
||||||
mongoTestClient.db.setReturnValues(testRetValues);
|
|
||||||
const testCollection = mongoTestClient.db.collection('test');
|
|
||||||
mongoTestClient._handleMongo(testCollection, {}, true, log,
|
|
||||||
(err, res) => {
|
|
||||||
assert.ifError(err, `Expected success, but got error ${err}`);
|
|
||||||
assert.deepStrictEqual(res, {
|
|
||||||
count: 100,
|
|
||||||
data: {
|
|
||||||
locationone: 500,
|
|
||||||
locationtwo: 500,
|
|
||||||
awsbackend: 500,
|
|
||||||
azurebackend: 500,
|
|
||||||
gcpbackend: 500,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
const testRetValuesNeg = [[{
|
|
||||||
count: [{ _id: null, count: 100 }],
|
|
||||||
data: [
|
|
||||||
{ _id: 'locationone', bytes: 100 },
|
|
||||||
{ _id: 'locationtwo', bytes: 100 },
|
|
||||||
],
|
|
||||||
repData: [
|
|
||||||
{ _id: 'awsbackend', bytes: 500 },
|
|
||||||
{ _id: 'azurebackend', bytes: 500 },
|
|
||||||
{ _id: 'gcpbackend', bytes: 500 },
|
|
||||||
],
|
|
||||||
compData: [
|
|
||||||
{ _id: 'locationone', bytes: 500 },
|
|
||||||
{ _id: 'locationtwo', bytes: 500 },
|
|
||||||
],
|
|
||||||
}]];
|
|
||||||
it('should return clamp negative values to 0', done => {
|
|
||||||
mongoTestClient.db.setReturnValues(testRetValuesNeg);
|
|
||||||
const testCollection = mongoTestClient.db.collection('test');
|
|
||||||
mongoTestClient._handleMongo(testCollection, {}, true, log,
|
|
||||||
(err, res) => {
|
|
||||||
assert.ifError(err, `Expected success, but got error ${err}`);
|
|
||||||
assert.deepStrictEqual(res, {
|
|
||||||
count: 100,
|
|
||||||
data: {
|
|
||||||
locationone: 0,
|
|
||||||
locationtwo: 0,
|
|
||||||
awsbackend: 500,
|
|
||||||
azurebackend: 500,
|
|
||||||
gcpbackend: 500,
|
|
||||||
},
|
|
||||||
});
|
|
||||||
return done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -1,16 +0,0 @@
|
||||||
const { EventEmitter } = require('events');
|
|
||||||
|
|
||||||
class DummyConfigObject extends EventEmitter {
|
|
||||||
constructor() {
|
|
||||||
super();
|
|
||||||
this.locationConstraints = null;
|
|
||||||
this.isTest = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
setLocationConstraints(locationConstraints) {
|
|
||||||
this.locationConstraints = locationConstraints;
|
|
||||||
this.emit('location-constraints-update');
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = DummyConfigObject;
|
|
|
@ -1,103 +0,0 @@
|
||||||
const testError = new Error('test error');
|
|
||||||
|
|
||||||
class DummyCollection {
|
|
||||||
constructor(name, isFail) {
|
|
||||||
this.s = {
|
|
||||||
name,
|
|
||||||
};
|
|
||||||
this.fail = isFail;
|
|
||||||
this.retQueue = [];
|
|
||||||
}
|
|
||||||
|
|
||||||
setReturnValues(retArray) {
|
|
||||||
this.retQueue.push(...retArray);
|
|
||||||
}
|
|
||||||
|
|
||||||
aggregate() {
|
|
||||||
return {
|
|
||||||
toArray: cb => {
|
|
||||||
if (this.retQueue.length <= 0) {
|
|
||||||
return cb(null, []);
|
|
||||||
}
|
|
||||||
const retVal = this.retQueue[0];
|
|
||||||
this.retQueue = this.retQueue.slice(1);
|
|
||||||
if (retVal instanceof Error) {
|
|
||||||
return cb(retVal);
|
|
||||||
}
|
|
||||||
return cb(null, retVal);
|
|
||||||
},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
bulkWrite(cmds, opt, cb) {
|
|
||||||
process.stdout.write('mock mongodb.bulkWrite call\n');
|
|
||||||
if (this.fail) {
|
|
||||||
return cb(testError);
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
}
|
|
||||||
|
|
||||||
update(target, doc, opt, cb) {
|
|
||||||
process.stdout.write('mock mongodb.update call\n');
|
|
||||||
if (this.fail) {
|
|
||||||
return cb(testError);
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
}
|
|
||||||
|
|
||||||
find() {
|
|
||||||
return {
|
|
||||||
toArray: cb => {
|
|
||||||
if (this.retQueue.length <= 0) {
|
|
||||||
return cb(null, []);
|
|
||||||
}
|
|
||||||
const retVal = this.retQueue[0];
|
|
||||||
this.retQueue = this.retQueue.slice(1);
|
|
||||||
if (retVal instanceof Error) {
|
|
||||||
return cb(retVal);
|
|
||||||
}
|
|
||||||
return cb(null, retVal);
|
|
||||||
},
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
findOne(query, opt, cb) {
|
|
||||||
if (typeof opt === 'function' && cb === undefined) {
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
cb = opt;
|
|
||||||
}
|
|
||||||
if (this.retQueue.length <= 0) {
|
|
||||||
return cb(null);
|
|
||||||
}
|
|
||||||
const retVal = this.retQueue[0];
|
|
||||||
this.retQueue = this.retQueue.slice(1);
|
|
||||||
if (retVal instanceof Error) {
|
|
||||||
return cb(retVal);
|
|
||||||
}
|
|
||||||
return cb(null, retVal);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class DummyMongoDB {
|
|
||||||
contructor() {
|
|
||||||
this.fail = false;
|
|
||||||
this.returnQueue = [];
|
|
||||||
}
|
|
||||||
|
|
||||||
reset() {
|
|
||||||
this.fail = false;
|
|
||||||
this.returnQueue = [];
|
|
||||||
}
|
|
||||||
|
|
||||||
setReturnValues(retValues) {
|
|
||||||
this.returnQueue.push(...retValues);
|
|
||||||
}
|
|
||||||
|
|
||||||
collection(name) {
|
|
||||||
const c = new DummyCollection(name, this.fail);
|
|
||||||
c.setReturnValues(this.returnQueue);
|
|
||||||
return c;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = DummyMongoDB;
|
|
|
@ -1,58 +0,0 @@
|
||||||
class DummyRequestLogger {
|
|
||||||
constructor() {
|
|
||||||
this.ops = [];
|
|
||||||
this.counts = {
|
|
||||||
trace: 0,
|
|
||||||
debug: 0,
|
|
||||||
info: 0,
|
|
||||||
warn: 0,
|
|
||||||
error: 0,
|
|
||||||
fatal: 0,
|
|
||||||
};
|
|
||||||
this.defaultFields = {};
|
|
||||||
}
|
|
||||||
|
|
||||||
trace(msg) {
|
|
||||||
this.ops.push(['trace', [msg]]);
|
|
||||||
this.counts.trace += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
debug(msg) {
|
|
||||||
this.ops.push(['debug', [msg]]);
|
|
||||||
this.counts.debug += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
info(msg) {
|
|
||||||
this.ops.push(['info', [msg]]);
|
|
||||||
this.counts.info += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
warn(msg) {
|
|
||||||
this.ops.push(['warn', [msg]]);
|
|
||||||
this.counts.warn += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
error(msg) {
|
|
||||||
this.ops.push(['error', [msg]]);
|
|
||||||
this.counts.error += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
fatal(msg) {
|
|
||||||
this.ops.push(['fatal', [msg]]);
|
|
||||||
this.counts.fatal += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
getSerializedUids() { // eslint-disable-line class-methods-use-this
|
|
||||||
return 'dummy:Serialized:Uids';
|
|
||||||
}
|
|
||||||
|
|
||||||
addDefaultFields(fields) {
|
|
||||||
Object.assign(this.defaultFields, fields);
|
|
||||||
}
|
|
||||||
|
|
||||||
end() {
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = DummyRequestLogger;
|
|
|
@ -1,24 +0,0 @@
|
||||||
const basicMD = {
|
|
||||||
'content-length': 0,
|
|
||||||
'key': '',
|
|
||||||
'versionId': '',
|
|
||||||
'replicationInfo': {
|
|
||||||
backends: [], // site, status
|
|
||||||
},
|
|
||||||
'dataStoreName': 'mongotest',
|
|
||||||
};
|
|
||||||
|
|
||||||
function generateMD(objKey, size, versionId, repBackends) {
|
|
||||||
const retMD = JSON.parse(JSON.stringify(basicMD));
|
|
||||||
retMD.key = objKey;
|
|
||||||
retMD['content-length'] = size;
|
|
||||||
retMD.versionId = versionId;
|
|
||||||
if (repBackends && Array.isArray(repBackends)) {
|
|
||||||
retMD.replicationInfo.backends.push(...repBackends);
|
|
||||||
}
|
|
||||||
return retMD;
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
generateMD,
|
|
||||||
};
|
|
|
@ -27,7 +27,9 @@ describe('test generating versionIds', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return error decoding non-hex string versionIds', () => {
|
it('should return error decoding non-hex string versionIds', () => {
|
||||||
assert(VID.decode('foo') instanceof Error);
|
const encoded = vids.map(vid => VID.encode(vid));
|
||||||
|
const decoded = encoded.map(vid => VID.decode(`${vid}foo`));
|
||||||
|
decoded.forEach(result => assert(result instanceof Error));
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should encode and decode versionIds', () => {
|
it('should encode and decode versionIds', () => {
|
||||||
|
|
|
@ -1,185 +0,0 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
|
||||||
|
|
||||||
|
|
||||||
class DummyProxyResponse {
|
|
||||||
/**
|
|
||||||
* Create a new instance of this dummy class
|
|
||||||
*
|
|
||||||
* This dummy class implements the minimum feature set
|
|
||||||
* of the class http.OutgoingMessage suitable for the
|
|
||||||
* arsenal.storage.metadata.proxy.BucketdRoutes test
|
|
||||||
* without using an actuall http server.
|
|
||||||
*
|
|
||||||
* @param {function} doneCB - function called once the response is
|
|
||||||
* ready to be consummed. (err, response, body)
|
|
||||||
*/
|
|
||||||
constructor(doneCB) {
|
|
||||||
this.headers = {};
|
|
||||||
this.body = null;
|
|
||||||
this.endCalled = false;
|
|
||||||
this.responseHead = null;
|
|
||||||
this.doneCB = doneCB;
|
|
||||||
}
|
|
||||||
writeHead(statusCode, statusMessage, header) {
|
|
||||||
this.responseHead = {
|
|
||||||
statusCode,
|
|
||||||
statusMessage,
|
|
||||||
header,
|
|
||||||
};
|
|
||||||
}
|
|
||||||
write(data) {
|
|
||||||
this.body = data;
|
|
||||||
}
|
|
||||||
end(cb) {
|
|
||||||
if (this.endCalled) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
this.endCalled = true;
|
|
||||||
process.nextTick(() => {
|
|
||||||
cb(null);
|
|
||||||
this.doneCB(null, this, JSON.parse(this.body));
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class DummyProxyRequest {
|
|
||||||
/**
|
|
||||||
* Create a new instance of this dummy class
|
|
||||||
*
|
|
||||||
* This dummy class implements the minimum feature set
|
|
||||||
* of the class http.IncomingMessage suitable for the
|
|
||||||
* arsenal.storage.metadata.proxy.BucketdRoutes test
|
|
||||||
* without using an actuall http server.
|
|
||||||
*
|
|
||||||
* @param {object} params - parameter set describing the intended request
|
|
||||||
* @param {string} params.method - http method to fake
|
|
||||||
* @param {string} params.url - url to fake
|
|
||||||
* @param {string} params.body - request body to fake
|
|
||||||
* @param {boolean} params.json - if set, assume the body to be a JSON
|
|
||||||
* value to be serialized
|
|
||||||
* @param {object} params.headers - request headers to fake
|
|
||||||
*/
|
|
||||||
constructor(params) {
|
|
||||||
this.method = params.method;
|
|
||||||
this.url = params.url;
|
|
||||||
this.json = params.json;
|
|
||||||
this.body = new Buffer(
|
|
||||||
this.json ? JSON.stringify(params.body) : (params.body || ''));
|
|
||||||
this.headers = params.headers;
|
|
||||||
this.socket = {
|
|
||||||
remoteAddress: '127.0.0.1',
|
|
||||||
remotePort: 32769,
|
|
||||||
};
|
|
||||||
this.dataConsummed = false;
|
|
||||||
this.endCB = null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* bind a callback to a particular event on the request processing
|
|
||||||
*
|
|
||||||
* @param {string} event - one of 'data', 'end' or 'error'
|
|
||||||
* @param {function} callback - a function suitable for the associated event
|
|
||||||
* @returns {object} this
|
|
||||||
*/
|
|
||||||
on(event, callback) {
|
|
||||||
switch (event) {
|
|
||||||
case 'data':
|
|
||||||
process.nextTick(() => {
|
|
||||||
callback(this.body);
|
|
||||||
this.dataConsummed = true;
|
|
||||||
if (this.endCB) {
|
|
||||||
this.endCB();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
break;
|
|
||||||
case 'end':
|
|
||||||
if (!this.dataConsummed) {
|
|
||||||
this.endCB = callback;
|
|
||||||
} else {
|
|
||||||
process.nextTick(() => {
|
|
||||||
callback();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case 'error':
|
|
||||||
// never happen with this mockup class
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
process.nextTick(() => callback(new Error(
|
|
||||||
`Unsupported DummyProxyRequest.on event '${event}'`)));
|
|
||||||
}
|
|
||||||
return this;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class RequestDispatcher {
|
|
||||||
/**
|
|
||||||
* Construct a new RequestDispatcher object.
|
|
||||||
*
|
|
||||||
* This class connects the provided Routes class to a dummy interface
|
|
||||||
* that enables tests to perform requests without using an actual http
|
|
||||||
* server.
|
|
||||||
*
|
|
||||||
* @param {object} routes - an instance of a Routes dispatcher class
|
|
||||||
*/
|
|
||||||
constructor(routes) {
|
|
||||||
this.routes = routes;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* fake a POST request on the associated Routes dispatcher
|
|
||||||
*
|
|
||||||
* @param {string} path - the path of the object to be posted
|
|
||||||
* @param {object} objectMD - the metadata to post for this object
|
|
||||||
* @param {function} callback - called once the request has been processed
|
|
||||||
* with these parameters (err)
|
|
||||||
* @returns {undefined}
|
|
||||||
*/
|
|
||||||
post(path, objectMD, callback) {
|
|
||||||
this.routes.dispatch(new DummyProxyRequest({
|
|
||||||
method: 'POST',
|
|
||||||
url: path,
|
|
||||||
json: true,
|
|
||||||
body: objectMD,
|
|
||||||
headers: {},
|
|
||||||
}), new DummyProxyResponse(callback));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* fake a GET request on the associated Routes dispatcher
|
|
||||||
*
|
|
||||||
* @param {string} path - the path of the object to be retrieved
|
|
||||||
* @param {function} callback - called once the request has been processed
|
|
||||||
* with these parameters (err, response, body)
|
|
||||||
* @returns {undefined}
|
|
||||||
*/
|
|
||||||
get(path, callback) {
|
|
||||||
this.routes.dispatch(new DummyProxyRequest({
|
|
||||||
method: 'GET',
|
|
||||||
url: path,
|
|
||||||
json: true,
|
|
||||||
body: '',
|
|
||||||
headers: {},
|
|
||||||
}), new DummyProxyResponse(callback));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* fake a DELETE request on the associated Routes dispatcher
|
|
||||||
*
|
|
||||||
* @param {string} path - the path of the object to be deleted
|
|
||||||
* @param {function} callback - called once the request has been processed
|
|
||||||
* with these parameters (err)
|
|
||||||
* @returns {undefined}
|
|
||||||
*/
|
|
||||||
delete(path, callback) {
|
|
||||||
this.routes.dispatch(new DummyProxyRequest({
|
|
||||||
method: 'DELETE',
|
|
||||||
url: path,
|
|
||||||
json: true,
|
|
||||||
body: '',
|
|
||||||
headers: {},
|
|
||||||
}), new DummyProxyResponse(callback));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = { RequestDispatcher };
|
|
Loading…
Reference in New Issue