Compare commits

..

No commits in common. "8d9507a5c88507dbcfaa95193d2927a2828e87dd" and "59803d7b67c6547565fb1605af805bc37f909844" have entirely different histories.

76 changed files with 383 additions and 11853 deletions

View File

@ -387,10 +387,6 @@
"code": 409, "code": 409,
"description": "The request was rejected because it attempted to create a resource that already exists." "description": "The request was rejected because it attempted to create a resource that already exists."
}, },
"KeyAlreadyExists": {
"code": 409,
"description": "The request was rejected because it attempted to create a resource that already exists."
},
"ServiceFailure": { "ServiceFailure": {
"code": 500, "code": 500,
"description": "Server error: the request processing has failed because of an unknown error, exception or failure." "description": "Server error: the request processing has failed because of an unknown error, exception or failure."
@ -467,10 +463,6 @@
"code": 400, "code": 400,
"description": "The request was rejected because an invalid or out-of-range value was supplied for an input parameter." "description": "The request was rejected because an invalid or out-of-range value was supplied for an input parameter."
}, },
"MalformedPolicy": {
"code": 400,
"description": "Policies must be valid JSON and the first byte must be '{'"
},
"_comment": "-------------- Special non-AWS S3 errors --------------", "_comment": "-------------- Special non-AWS S3 errors --------------",
"MPUinProgress": { "MPUinProgress": {
"code": 409, "code": 409,

View File

@ -3,7 +3,6 @@ module.exports = {
constants: require('./lib/constants'), constants: require('./lib/constants'),
db: require('./lib/db'), db: require('./lib/db'),
errors: require('./lib/errors.js'), errors: require('./lib/errors.js'),
errorUtils: require('./lib/errorUtils'),
shuffle: require('./lib/shuffle'), shuffle: require('./lib/shuffle'),
stringHash: require('./lib/stringHash'), stringHash: require('./lib/stringHash'),
ipCheck: require('./lib/ipCheck'), ipCheck: require('./lib/ipCheck'),
@ -13,7 +12,15 @@ module.exports = {
dhparam: require('./lib/https/dh2048.js'), dhparam: require('./lib/https/dh2048.js'),
}, },
algorithms: { algorithms: {
list: require('./lib/algos/list/exportAlgos'), list: {
Basic: require('./lib/algos/list/basic').List,
Delimiter: require('./lib/algos/list/delimiter').Delimiter,
DelimiterVersions: require('./lib/algos/list/delimiterVersions')
.DelimiterVersions,
DelimiterMaster: require('./lib/algos/list/delimiterMaster')
.DelimiterMaster,
MPU: require('./lib/algos/list/MPU').MultipartUploads,
},
listTools: { listTools: {
DelimiterTools: require('./lib/algos/list/tools'), DelimiterTools: require('./lib/algos/list/tools'),
}, },
@ -46,10 +53,6 @@ module.exports = {
RESTClient: require('./lib/network/rest/RESTClient'), RESTClient: require('./lib/network/rest/RESTClient'),
}, },
RoundRobin: require('./lib/network/RoundRobin'), RoundRobin: require('./lib/network/RoundRobin'),
probe: {
HealthProbeServer:
require('./lib/network/probe/HealthProbeServer.js'),
},
kmip: require('./lib/network/kmip'), kmip: require('./lib/network/kmip'),
kmipClient: require('./lib/network/kmip/Client'), kmipClient: require('./lib/network/kmip/Client'),
}, },
@ -62,9 +65,6 @@ module.exports = {
convertToXml: require('./lib/s3middleware/convertToXml'), convertToXml: require('./lib/s3middleware/convertToXml'),
escapeForXml: require('./lib/s3middleware/escapeForXml'), escapeForXml: require('./lib/s3middleware/escapeForXml'),
tagging: require('./lib/s3middleware/tagging'), tagging: require('./lib/s3middleware/tagging'),
checkDateModifiedHeaders:
require('./lib/s3middleware/validateConditionalHeaders')
.checkDateModifiedHeaders,
validateConditionalHeaders: validateConditionalHeaders:
require('./lib/s3middleware/validateConditionalHeaders') require('./lib/s3middleware/validateConditionalHeaders')
.validateConditionalHeaders, .validateConditionalHeaders,
@ -82,39 +82,12 @@ module.exports = {
}, },
storage: { storage: {
metadata: { metadata: {
MetadataWrapper: require('./lib/storage/metadata/MetadataWrapper'),
bucketclient: {
BucketClientInterface:
require('./lib/storage/metadata/bucketclient/' +
'BucketClientInterface'),
LogConsumer:
require('./lib/storage/metadata/bucketclient/LogConsumer'),
},
file: {
BucketFileInterface:
require('./lib/storage/metadata/file/BucketFileInterface'),
MetadataFileServer: MetadataFileServer:
require('./lib/storage/metadata/file/MetadataFileServer'), require('./lib/storage/metadata/file/MetadataFileServer'),
MetadataFileClient: MetadataFileClient:
require('./lib/storage/metadata/file/MetadataFileClient'), require('./lib/storage/metadata/file/MetadataFileClient'),
},
inMemory: {
metastore:
require('./lib/storage/metadata/in_memory/metastore'),
metadata: require('./lib/storage/metadata/in_memory/metadata'),
bucketUtilities:
require('./lib/storage/metadata/in_memory/bucket_utilities'),
},
mongoclient: {
MongoClientInterface:
require('./lib/storage/metadata/mongoclient/' +
'MongoClientInterface'),
LogConsumer: LogConsumer:
require('./lib/storage/metadata/mongoclient/LogConsumer'), require('./lib/storage/metadata/bucketclient/LogConsumer'),
},
proxy: {
Server: require('./lib/storage/metadata/proxy/Server'),
},
}, },
data: { data: {
file: { file: {
@ -143,8 +116,4 @@ module.exports = {
pensieve: { pensieve: {
credentialUtils: require('./lib/executables/pensieveCreds/utils'), credentialUtils: require('./lib/executables/pensieveCreds/utils'),
}, },
backbeat: {
Metrics: require('./lib/backbeat/Metrics'),
routes: require('./lib/backbeat/routes'),
},
}; };

View File

@ -1,9 +0,0 @@
module.exports = {
Basic: require('./basic').List,
Delimiter: require('./delimiter').Delimiter,
DelimiterVersions: require('./delimiterVersions')
.DelimiterVersions,
DelimiterMaster: require('./delimiterMaster')
.DelimiterMaster,
MPU: require('./MPU').MultipartUploads,
};

View File

@ -1,88 +0,0 @@
const assert = require('assert');
const { FILTER_END, FILTER_SKIP, SKIP_NONE } = require('./tools');
const MAX_STREAK_LENGTH = 100;
/**
* Handle the filtering and the skip mechanism of a listing result.
*/
class Skip {
/**
* @param {Object} params - skip parameters
* @param {Object} params.extension - delimiter extension used (required)
* @param {String} params.gte - current range gte (greater than or
* equal) used by the client code
*/
constructor(params) {
assert(params.extension);
this.extension = params.extension;
this.gteParams = params.gte;
this.listingEndCb = null;
this.skipRangeCb = null;
/* Used to count consecutive FILTER_SKIP returned by the extension
* filter method. Once this counter reaches MAX_STREAK_LENGTH, the
* filter function tries to skip unwanted values by defining a new
* range. */
this.streakLength = 0;
}
setListingEndCb(cb) {
this.listingEndCb = cb;
}
setSkipRangeCb(cb) {
this.skipRangeCb = cb;
}
/**
* Filter an entry.
* @param {Object} entry - entry to filter.
* @return {undefined}
*
* This function calls the listing end or the skip range callbacks if
* needed.
*/
filter(entry) {
assert(this.listingEndCb);
assert(this.skipRangeCb);
const filteringResult = this.extension.filter(entry);
const skippingRange = this.extension.skipping();
if (filteringResult === FILTER_END) {
this.listingEndCb();
} else if (filteringResult === FILTER_SKIP
&& skippingRange !== SKIP_NONE) {
if (++this.streakLength >= MAX_STREAK_LENGTH) {
const newRange = this._inc(skippingRange);
/* Avoid to loop on the same range again and again. */
if (newRange === this.gteParams) {
this.streakLength = 1;
} else {
this.skipRangeCb(newRange);
}
}
} else {
this.streakLength = 0;
}
}
_inc(str) {
if (!str) {
return str;
}
const lastCharValue = str.charCodeAt(str.length - 1);
const lastCharNewValue = String.fromCharCode(lastCharValue + 1);
return `${str.slice(0, str.length - 1)}${lastCharNewValue}`;
}
}
module.exports = Skip;

View File

@ -127,17 +127,6 @@ function check(request, log, data, awsService) {
return { err: errors.RequestTimeTooSkewed }; return { err: errors.RequestTimeTooSkewed };
} }
let proxyPath = null;
if (request.headers.proxy_path) {
try {
proxyPath = decodeURIComponent(request.headers.proxy_path);
} catch (err) {
log.debug('invalid proxy_path header', { proxyPath, err });
return { err: errors.InvalidArgument.customizeDescription(
'invalid proxy_path header') };
}
}
const stringToSign = constructStringToSign({ const stringToSign = constructStringToSign({
log, log,
request, request,
@ -147,7 +136,6 @@ function check(request, log, data, awsService) {
timestamp, timestamp,
payloadChecksum, payloadChecksum,
awsService: service, awsService: service,
proxyPath,
}); });
log.trace('constructed stringToSign', { stringToSign }); log.trace('constructed stringToSign', { stringToSign });
if (stringToSign instanceof Error) { if (stringToSign instanceof Error) {

View File

@ -62,17 +62,6 @@ function check(request, log, data) {
return { err: errors.RequestTimeTooSkewed }; return { err: errors.RequestTimeTooSkewed };
} }
let proxyPath = null;
if (request.headers.proxy_path) {
try {
proxyPath = decodeURIComponent(request.headers.proxy_path);
} catch (err) {
log.debug('invalid proxy_path header', { proxyPath });
return { err: errors.InvalidArgument.customizeDescription(
'invalid proxy_path header') };
}
}
// In query v4 auth, the canonical request needs // In query v4 auth, the canonical request needs
// to include the query params OTHER THAN // to include the query params OTHER THAN
// the signature so create a // the signature so create a
@ -98,7 +87,6 @@ function check(request, log, data) {
credentialScope: credentialScope:
`${scopeDate}/${region}/${service}/${requestType}`, `${scopeDate}/${region}/${service}/${requestType}`,
awsService: service, awsService: service,
proxyPath,
}); });
if (stringToSign instanceof Error) { if (stringToSign instanceof Error) {
return { err: stringToSign }; return { err: stringToSign };

View File

@ -1,539 +0,0 @@
const async = require('async');
const errors = require('../../lib/errors');
const RedisClient = require('../../lib/metrics/RedisClient');
const StatsModel = require('../../lib/metrics/StatsModel');
const INTERVAL = 300; // 5 minutes
const EXPIRY = 86400; // 24 hours
const THROUGHPUT_EXPIRY = 900; // 15 minutes
const isTest = process.env.CI === 'true';
class Metrics {
constructor(config, logger) {
const { redisConfig, validSites, internalStart } = config;
this._logger = logger;
this._redisClient = new RedisClient(redisConfig, this._logger);
// Redis expiry increased by an additional interval so we can reference
// the immediate older data for average throughput calculation
this._statsClient = new StatsModel(this._redisClient, INTERVAL, EXPIRY);
this._validSites = validSites;
this._internalStart = internalStart;
}
/**
* Query StatsClient for all ops given
* @param {array} ops - array of redis key names to query
* @param {string} site - site name or '*' wildcard
* @param {string} bucketName - the name of the bucket
* @param {string} objectKey - the object key name
* @param {string} versionId - the object version ID
* @param {function} cb - callback(err, res)
* @return {undefined}
*/
_queryStats(ops, site, bucketName, objectKey, versionId, cb) {
return async.map(ops, (op, done) => {
const hasGlobalKey = this._hasGlobalKey(op);
if (site === 'all') {
const queryStrings = this._validSites.map(s => {
if (bucketName && objectKey && versionId) {
return `${s}:${bucketName}:${objectKey}:` +
`${versionId}:${op}`;
}
return `${s}:${op}`;
});
if (hasGlobalKey) {
return this._statsClient.getAllGlobalStats(queryStrings,
this._logger, done);
}
return this._statsClient.getAllStats(this._logger, queryStrings,
done);
}
// Query only a single given site or storage class
// First, validate the site or storage class
if (!this._validSites.includes(site)) {
// escalate error to log later
return done({
message: 'invalid site name provided',
type: errors.RouteNotFound,
method: 'Metrics._queryStats',
});
}
let queryString;
if (bucketName && objectKey && versionId) {
queryString =
`${site}:${bucketName}:${objectKey}:${versionId}:${op}`;
} else {
queryString = `${site}:${op}`;
}
if (hasGlobalKey) {
return this._redisClient.get(queryString, (err, res) => {
if (err) {
return done({
message: `Redis error: ${err.message}`,
type: errors.InternalError,
method: 'Metrics._queryStats',
});
}
return done(null, { requests: [res || 0] });
});
}
return this._statsClient.getStats(this._logger, queryString, done);
}, cb);
}
/**
* Determines whether the Redis op uses a global counter or interval key.
* @param {String} op - The Redis operation
* @return {Boolean} true if a global counter, false otherwise
*/
_hasGlobalKey(op) {
if (isTest) {
return op.includes('test:bb:bytespending') ||
op.includes('test:bb:opspending');
}
return op.includes('bb:crr:bytespending') ||
op.includes('bb:crr:opspending');
}
/**
* Get data points which are the keys used to query Redis
* @param {object} details - route details from lib/backbeat/routes.js
* @param {array} data - provides already fetched data in order of
* dataPoints mentioned for each route in lib/backbeat/routes.js. This can
* be undefined.
* @param {function} cb - callback(error, data), where data returns
* data stored in Redis.
* @return {array} dataPoints array defined in lib/backbeat/routes.js
*/
_getData(details, data, cb) {
if (!data) {
const { dataPoints, site, bucketName, objectKey,
versionId } = details;
return this._queryStats(dataPoints, site, bucketName, objectKey,
versionId, cb);
}
return cb(null, data);
}
/**
* Uptime of server based on this._internalStart up to max of expiry
* @param {number} expiry - max expiry
* @return {number} uptime of server up to expiry time
*/
_getMaxUptime(expiry) {
let secondsSinceStart = (Date.now() - this._internalStart) / 1000;
// allow only a minimum value of 1 for uptime
if (secondsSinceStart < 1) {
secondsSinceStart = 1;
}
return secondsSinceStart < expiry ? secondsSinceStart : expiry;
}
/**
* Get replication backlog in ops count and size in bytes
* @param {object} details - route details from lib/backbeat/routes.js
* @param {function} cb - callback(error, data)
* @param {array} data - optional field providing already fetched data in
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
* @return {undefined}
*/
getBacklog(details, cb, data) {
this._getData(details, data, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: backlog', {
origin: err.method,
method: 'Metrics.getBacklog',
});
return cb(err.type.customizeDescription(err.message));
}
if (err || res.length !== details.dataPoints.length) {
this._logger.error('error getting metrics: backlog', {
method: 'Metrics.getBacklog',
});
return cb(errors.InternalError);
}
const count = Number.parseInt(res[0].requests, 10);
const size = Number.parseInt(res[1].requests, 10);
const response = {
backlog: {
description: 'Number of incomplete replication ' +
'operations (count) and number of incomplete bytes ' +
'transferred (size)',
results: {
count: count < 0 ? 0 : count,
size: size < 0 ? 0 : size,
},
},
};
return cb(null, response);
});
}
/**
* Get completed replicated stats by ops count and size in bytes
* @param {object} details - route details from lib/backbeat/routes.js
* @param {function} cb - callback(error, data)
* @param {array} data - optional field providing already fetched data in
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
* @return {undefined}
*/
getCompletions(details, cb, data) {
this._getData(details, data, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: completions', {
origin: err.method,
method: 'Metrics.getCompletions',
});
return cb(err.type.customizeDescription(err.message));
}
if (err || res.length !== details.dataPoints.length) {
this._logger.error('error getting metrics: completions', {
method: 'Metrics.getCompletions',
});
return cb(errors.InternalError);
}
const uptime = this._getMaxUptime(EXPIRY);
const numOfIntervals = Math.ceil(uptime / INTERVAL);
const [opsDone, bytesDone] = res.map(r => (
r.requests.slice(0, numOfIntervals).reduce((acc, i) =>
acc + i, 0)
));
const response = {
completions: {
description: 'Number of completed replication operations ' +
'(count) and number of bytes transferred (size) in ' +
`the last ${Math.floor(uptime)} seconds`,
results: {
count: opsDone,
size: bytesDone,
},
},
};
return cb(null, response);
});
}
/**
* Get failed replication stats by ops count and size in bytes
* @param {object} details - route details from lib/backbeat/routes.js
* @param {function} cb - callback(error, data)
* @param {array} data - optional field providing already fetched data in
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
* @return {undefined}
*/
getFailedMetrics(details, cb, data) {
this._getData(details, data, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: failures', {
origin: err.emthod,
method: 'Metrics.getFailedMetrics',
});
return cb(err.type.customizeDescription(err.message));
}
if (err || res.length !== details.dataPoints.length) {
this._logger.error('error getting metrics: failures', {
method: 'Metrics.getFailedMetrics',
});
return cb(errors.InternalError);
}
const uptime = this._getMaxUptime(EXPIRY);
const numOfIntervals = Math.ceil(uptime / INTERVAL);
const [opsFail, bytesFail] = res.map(r => (
r.requests.slice(0, numOfIntervals).reduce((acc, i) =>
acc + i, 0)
));
const response = {
failures: {
description: 'Number of failed replication operations ' +
'(count) and bytes (size) in the last ' +
`${Math.floor(uptime)} seconds`,
results: {
count: opsFail,
size: bytesFail,
},
},
};
return cb(null, response);
});
}
/**
* Get current throughput in ops/sec and bytes/sec up to max of 15 minutes
* Throughput is the number of units processed in a given time
* @param {object} details - route details from lib/backbeat/routes.js
* @param {function} cb - callback(error, data)
* @param {array} data - optional field providing already fetched data in
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
* @return {undefined}
*/
getThroughput(details, cb, data) {
this._getData(details, data, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: throughput', {
origin: err.method,
method: 'Metrics.getThroughput',
});
return cb(err.type.customizeDescription(err.message));
}
if (err) {
this._logger.error('error getting metrics: throughput', {
method: 'Metrics.getThroughput',
});
return cb(errors.InternalError);
}
const now = new Date();
const uptime = this._getMaxUptime(THROUGHPUT_EXPIRY);
const numOfIntervals = Math.ceil(uptime / INTERVAL);
const [opsThroughput, bytesThroughput] = res.map(r => {
let total = r.requests.slice(0, numOfIntervals).reduce(
(acc, i) => acc + i, 0);
// if uptime !== THROUGHPUT_EXPIRY, use internal timer and
// do not include the extra 4th interval
if (uptime === THROUGHPUT_EXPIRY) {
// all intervals apply, including 4th interval
const lastInterval =
this._statsClient._normalizeTimestamp(now);
// in seconds
const diff = (now - lastInterval) / 1000;
// Get average for last interval depending on time
// surpassed so far for newest interval
total += ((INTERVAL - diff) / INTERVAL) *
r.requests[numOfIntervals];
}
// Divide total by uptime to determine data per second
return (total / uptime);
});
const response = {
throughput: {
description: 'Current throughput for replication ' +
'operations in ops/sec (count) and bytes/sec (size) ' +
`in the last ${Math.floor(uptime)} seconds`,
results: {
count: opsThroughput.toFixed(2),
size: bytesThroughput.toFixed(2),
},
},
};
return cb(null, response);
});
}
/**
* Get current throughput for an object in bytes/sec. Throughput is the
* number of bytes transferred in a given time.
* @param {object} details - route details from lib/api/routes.js
* @param {function} cb - callback(error, data)
* @return {undefined}
*/
getObjectThroughput(details, cb) {
this._getData(details, undefined, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: object throughput', {
origin: err.method,
method: 'Metrics.getObjectThroughput',
});
return cb(err.type.customizeDescription(err.message));
}
if (err) {
this._logger.error('error getting metrics: object throughput', {
method: 'Metrics.getObjectThroughput',
error: err.message,
});
return cb(errors.InternalError);
}
const now = new Date();
const uptime = this._getMaxUptime(THROUGHPUT_EXPIRY);
const numOfIntervals = Math.ceil(uptime / INTERVAL);
const { requests } = res[0]; // Bytes done
let total = requests.slice(0, numOfIntervals)
.reduce((acc, i) => acc + i, 0);
// if uptime !== THROUGHPUT_EXPIRY, use internal timer
// and do not include the extra 4th interval
if (uptime === THROUGHPUT_EXPIRY) {
// all intervals apply, including 4th interval
const lastInterval =
this._statsClient._normalizeTimestamp(now);
// in seconds
const diff = (now - lastInterval) / 1000;
// Get average for last interval depending on time passed so
// far for newest interval
total += ((INTERVAL - diff) / INTERVAL) *
requests[numOfIntervals];
}
// Divide total by timeDisplay to determine data per second
const response = {
description: 'Current throughput for object replication in ' +
'bytes/sec (throughput)',
throughput: (total / uptime).toFixed(2),
};
return cb(null, response);
});
}
/**
* Get CRR progress for an object in bytes. Progress is the percentage of
* the object that has completed replication.
* @param {object} details - route details from lib/api/routes.js
* @param {function} cb - callback(error, data)
* @return {undefined}
*/
getObjectProgress(details, cb) {
this._getData(details, undefined, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: object progress', {
origin: err.method,
method: 'Metrics.getObjectProgress',
});
return cb(err.type.customizeDescription(err.message));
}
if (err || res.length !== details.dataPoints.length) {
this._logger.error('error getting metrics: object progress', {
method: 'Metrics.getObjectProgress',
error: err.message,
});
return cb(errors.InternalError);
}
// Find if time since start is less than EXPIRY time
const uptime = this._getMaxUptime(EXPIRY);
const numOfIntervals = Math.ceil(uptime / INTERVAL);
const [totalBytesToComplete, bytesComplete] = res.map(r => (
r.requests.slice(0, numOfIntervals).reduce((acc, i) =>
acc + i, 0)
));
const ratio = totalBytesToComplete === 0 ? 0 :
bytesComplete / totalBytesToComplete;
const percentage = (ratio * 100).toFixed();
const response = {
description: 'Number of bytes to be replicated ' +
'(pending), number of bytes transferred to the ' +
'destination (completed), and percentage of the ' +
'object that has completed replication (progress)',
pending: totalBytesToComplete - bytesComplete,
completed: bytesComplete,
progress: `${percentage}%`,
};
return cb(null, response);
});
}
/**
* Get pending replication stats by ops count and size in bytes
* @param {object} details - route details from lib/backbeat/routes.js
* @param {function} cb - callback(error, data)
* @param {array} data - optional field providing already fetched data in
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
* @return {undefined}
*/
getPending(details, cb, data) {
this._getData(details, data, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: pending', {
origin: err.method,
method: 'Metrics.getPending',
});
return cb(err.type.customizeDescription(err.message));
}
const { dataPoints } = details;
if (err || res.length !== dataPoints.length) {
this._logger.error('error getting metrics: pending', {
method: 'Metrics.getPending',
error: err,
dataPoints,
res,
});
return cb(errors.InternalError
.customizeDescription('error getting metrics: pending'));
}
const count = Number.parseInt(res[0].requests, 10);
const size = Number.parseInt(res[1].requests, 10);
const response = {
pending: {
description: 'Number of pending replication ' +
'operations (count) and bytes (size)',
results: {
count: count < 0 ? 0 : count,
size: size < 0 ? 0 : size,
},
},
};
return cb(null, response);
});
}
/**
* Get all metrics
* @param {object} details - route details from lib/backbeat/routes.js
* @param {function} cb = callback(error, data)
* @param {array} data - optional field providing already fetched data in
* order of dataPoints mentioned for each route in lib/backbeat/routes.js
* @return {undefined}
*/
getAllMetrics(details, cb, data) {
this._getData(details, data, (err, res) => {
if (err && err.type) {
this._logger.error('error getting metric: all', {
origin: err.method,
method: 'Metrics.getAllMetrics',
});
return cb(err.type.customizeDescription(err.message));
}
if (err || res.length !== details.dataPoints.length) {
this._logger.error('error getting metrics: all', {
method: 'Metrics.getAllMetrics',
});
return cb(errors.InternalError);
}
// NOTE: Edited to fit failed metrics
const failMetricsDetails = Object.assign({}, details,
{ dataPoints: new Array(2) });
// res = [ ops, ops_done, ops_fail, bytes, bytes_done, bytes_fail,
// opsPending, bytesPending ]
return async.parallel([
done => this.getBacklog({ dataPoints: new Array(2) }, done,
[res[6], res[7]]),
done => this.getCompletions({ dataPoints: new Array(2) }, done,
[res[1], res[4]]),
done => this.getFailedMetrics(failMetricsDetails, done,
[res[2], res[5]]),
done => this.getThroughput({ dataPoints: new Array(2) }, done,
[res[1], res[4]]),
done => this.getPending({ dataPoints: new Array(2) }, done,
[res[6], res[7]]),
], (err, results) => {
if (err) {
this._logger.error('error getting metrics: all', {
method: 'Metrics.getAllMetrics',
});
return cb(errors.InternalError);
}
const store = Object.assign({}, ...results);
return cb(null, store);
});
});
}
/**
* Close redis client
* @param {function} cb - callback(error, data)
* @return {undefined}
*/
disconnect(cb) {
return this._redisClient.disconnect(cb);
}
/**
* Retrieve the list of redis client connectiosn
* @param {function} cb - callback(error, data)
* @return {undefined}
*/
listClients(cb) {
return this._redisClient.listClients(cb);
}
}
module.exports = Metrics;

View File

@ -1,167 +0,0 @@
/*
This file contains Backbeat API routes and route details
*/
/**
* The metrics route model.
* @param {Object} redisKeys - The Redis keys used for Backbeat metrics
* @param {Array} allLocations - The list of replication location names
* @return {Array} The array of route objects
*/
function routes(redisKeys, allLocations) {
return [
// Route: /_/healthcheck
{
httpMethod: 'GET',
category: 'healthcheck',
type: 'basic',
method: 'getHealthcheck',
extensions: {},
},
// Route: /_/metrics/crr/<location>/pending
{
httpMethod: 'GET',
category: 'metrics',
type: 'pending',
extensions: { crr: [...allLocations, 'all'] },
method: 'getPending',
dataPoints: [redisKeys.opsPending, redisKeys.bytesPending],
},
// Route: /_/metrics/crr/<location>/backlog
{
httpMethod: 'GET',
category: 'metrics',
type: 'backlog',
extensions: { crr: [...allLocations, 'all'] },
method: 'getBacklog',
dataPoints: [redisKeys.opsPending, redisKeys.bytesPending],
},
// Route: /_/metrics/crr/<location>/completions
{
httpMethod: 'GET',
category: 'metrics',
type: 'completions',
extensions: { crr: [...allLocations, 'all'] },
method: 'getCompletions',
dataPoints: [redisKeys.opsDone, redisKeys.bytesDone],
},
// Route: /_/metrics/crr/<location>/failures
{
httpMethod: 'GET',
category: 'metrics',
type: 'failures',
extensions: { crr: [...allLocations, 'all'] },
method: 'getFailedMetrics',
dataPoints: [redisKeys.opsFail, redisKeys.bytesFail],
},
// Route: /_/metrics/crr/<location>/throughput
{
httpMethod: 'GET',
category: 'metrics',
type: 'throughput',
extensions: { crr: [...allLocations, 'all'] },
method: 'getThroughput',
dataPoints: [redisKeys.opsDone, redisKeys.bytesDone],
},
// Route: /_/metrics/crr/<location>/all
{
httpMethod: 'GET',
category: 'metrics',
type: 'all',
extensions: { crr: [...allLocations, 'all'] },
method: 'getAllMetrics',
dataPoints: [redisKeys.ops, redisKeys.opsDone, redisKeys.opsFail,
redisKeys.bytes, redisKeys.bytesDone, redisKeys.bytesFail,
redisKeys.opsPending, redisKeys.bytesPending],
},
// Route: /_/metrics/crr/<site>/progress/<bucket>/<key>
{
httpMethod: 'GET',
category: 'metrics',
type: 'progress',
level: 'object',
extensions: { crr: [...allLocations] },
method: 'getObjectProgress',
dataPoints: [redisKeys.objectBytes, redisKeys.objectBytesDone],
},
// Route: /_/metrics/crr/<site>/throughput/<bucket>/<key>
{
httpMethod: 'GET',
category: 'metrics',
type: 'throughput',
level: 'object',
extensions: { crr: [...allLocations] },
method: 'getObjectThroughput',
dataPoints: [redisKeys.objectBytesDone],
},
// Route: /_/crr/failed?site=<site>&marker=<marker>
{
httpMethod: 'GET',
type: 'all',
extensions: { crr: ['failed'] },
method: 'getSiteFailedCRR',
},
// Route: /_/crr/failed/<bucket>/<key>/<versionId>
{
httpMethod: 'GET',
type: 'specific',
extensions: { crr: ['failed'] },
method: 'getFailedCRR',
},
// Route: /_/crr/failed
{
httpMethod: 'POST',
type: 'all',
extensions: { crr: ['failed'] },
method: 'retryFailedCRR',
},
// Route: /_/monitoring/metrics
{
httpMethod: 'GET',
category: 'monitoring',
type: 'metrics',
extensions: {},
method: 'monitoringHandler',
},
// Route: /_/crr/pause/<location>
// Where <location> is an optional field
{
httpMethod: 'POST',
type: 'pause',
extensions: { crr: [...allLocations, 'all'] },
method: 'pauseCRRService',
},
// Route: /_/crr/resume/<location>
// Route: /_/crr/resume/<location>/schedule
// Where <location> is an optional field unless "schedule" route
{
httpMethod: 'POST',
type: 'resume',
extensions: { crr: [...allLocations, 'all'] },
method: 'resumeCRRService',
},
{
httpMethod: 'DELETE',
type: 'resume',
extensions: { crr: [...allLocations, 'all'] },
method: 'deleteScheduledResumeService',
},
// Route: /_/crr/resume/<location>
{
httpMethod: 'GET',
type: 'resume',
extensions: { crr: [...allLocations, 'all'] },
method: 'getResumeCRRSchedule',
},
// Route: /_/crr/status/<location>
// Where <location> is an optional field
{
httpMethod: 'GET',
type: 'status',
extensions: { crr: [...allLocations, 'all'] },
method: 'getCRRServiceStatus',
},
];
}
module.exports = routes;

View File

@ -72,10 +72,4 @@ module.exports = {
permittedCapitalizedBuckets: { permittedCapitalizedBuckets: {
METADATA: true, METADATA: true,
}, },
// Setting a lower object key limit to account for:
// - Mongo key limit of 1012 bytes
// - Version ID in Mongo Key if versioned of 33
// - Max bucket name length if bucket match false of 63
// - Extra prefix slash for bucket prefix if bucket match of 1
objectKeyByteLimit: 915,
}; };

View File

@ -1,13 +0,0 @@
function reshapeExceptionError(error) {
const { message, code, stack, name } = error;
return {
message,
code,
stack,
name,
};
}
module.exports = {
reshapeExceptionError,
};

View File

@ -22,28 +22,6 @@ class RedisClient {
return this; return this;
} }
/**
* scan a pattern and return matching keys
* @param {string} pattern - string pattern to match with all existing keys
* @param {number} [count=10] - scan count
* @param {callback} cb - callback (error, result)
* @return {undefined}
*/
scan(pattern, count = 10, cb) {
const params = { match: pattern, count };
const keys = [];
const stream = this._client.scanStream(params);
stream.on('data', resultKeys => {
for (let i = 0; i < resultKeys.length; i++) {
keys.push(resultKeys[i]);
}
});
stream.on('end', () => {
cb(null, keys);
});
}
/** /**
* increment value of a key by 1 and set a ttl * increment value of a key by 1 and set a ttl
* @param {string} key - key holding the value * @param {string} key - key holding the value
@ -57,17 +35,6 @@ class RedisClient {
.exec(cb); .exec(cb);
} }
/**
* increment value of a key by a given amount
* @param {string} key - key holding the value
* @param {number} amount - amount to increase by
* @param {callback} cb - callback
* @return {undefined}
*/
incrby(key, amount, cb) {
return this._client.incrby(key, amount, cb);
}
/** /**
* increment value of a key by a given amount and set a ttl * increment value of a key by a given amount and set a ttl
* @param {string} key - key holding the value * @param {string} key - key holding the value
@ -83,24 +50,13 @@ class RedisClient {
} }
/** /**
* decrement value of a key by a given amount * execute a batch of commands
* @param {string} key - key holding the value * @param {string[]} cmds - list of commands
* @param {number} amount - amount to increase by
* @param {callback} cb - callback * @param {callback} cb - callback
* @return {undefined} * @return {undefined}
*/ */
decrby(key, amount, cb) { batch(cmds, cb) {
return this._client.decrby(key, amount, cb); return this._client.pipeline(cmds).exec(cb);
}
/**
* get value stored at key
* @param {string} key - key holding the value
* @param {callback} cb - callback
* @return {undefined}
*/
get(key, cb) {
return this._client.get(key, cb);
} }
/** /**
@ -115,16 +71,6 @@ class RedisClient {
return this._client.exists(key, cb); return this._client.exists(key, cb);
} }
/**
* execute a batch of commands
* @param {string[]} cmds - list of commands
* @param {callback} cb - callback
* @return {undefined}
*/
batch(cmds, cb) {
return this._client.pipeline(cmds).exec(cb);
}
/** /**
* Add a value and its score to a sorted set. If no sorted set exists, this * Add a value and its score to a sorted set. If no sorted set exists, this
* will create a new one for the given key. * will create a new one for the given key.
@ -204,27 +150,9 @@ class RedisClient {
return this._client.zrangebyscore(key, min, max, cb); return this._client.zrangebyscore(key, min, max, cb);
} }
/**
* get TTL or expiration in seconds
* @param {string} key - name of key
* @param {function} cb - callback
* @return {undefined}
*/
ttl(key, cb) {
return this._client.ttl(key, cb);
}
clear(cb) { clear(cb) {
return this._client.flushdb(cb); return this._client.flushdb(cb);
} }
disconnect(cb) {
return this._client.quit(cb);
}
listClients(cb) {
return this._client.client('list', cb);
}
} }
module.exports = RedisClient; module.exports = RedisClient;

View File

@ -41,11 +41,11 @@ class StatsClient {
/** /**
* build redis key to get total number of occurrences on the server * build redis key to get total number of occurrences on the server
* @param {string} name - key name identifier * @param {string} name - key name identifier
* @param {Date} date - Date instance * @param {object} d - Date instance
* @return {string} key - key for redis * @return {string} key - key for redis
*/ */
buildKey(name, date) { _buildKey(name, d) {
return `${name}:${this._normalizeTimestamp(date)}`; return `${name}:${this._normalizeTimestamp(d)}`;
} }
/** /**
@ -85,35 +85,11 @@ class StatsClient {
amount = (typeof incr === 'number') ? incr : 1; amount = (typeof incr === 'number') ? incr : 1;
} }
const key = this.buildKey(`${id}:requests`, new Date()); const key = this._buildKey(`${id}:requests`, new Date());
return this._redis.incrbyEx(key, amount, this._expiry, callback); return this._redis.incrbyEx(key, amount, this._expiry, callback);
} }
/**
* Increment the given key by the given value.
* @param {String} key - The Redis key to increment
* @param {Number} incr - The value to increment by
* @param {function} [cb] - callback
* @return {undefined}
*/
incrementKey(key, incr, cb) {
const callback = cb || this._noop;
return this._redis.incrby(key, incr, callback);
}
/**
* Decrement the given key by the given value.
* @param {String} key - The Redis key to decrement
* @param {Number} decr - The value to decrement by
* @param {function} [cb] - callback
* @return {undefined}
*/
decrementKey(key, decr, cb) {
const callback = cb || this._noop;
return this._redis.decrby(key, decr, callback);
}
/** /**
* report/record a request that ended up being a 500 on the server * report/record a request that ended up being a 500 on the server
* @param {string} id - service identifier * @param {string} id - service identifier
@ -125,54 +101,10 @@ class StatsClient {
return undefined; return undefined;
} }
const callback = cb || this._noop; const callback = cb || this._noop;
const key = this.buildKey(`${id}:500s`, new Date()); const key = this._buildKey(`${id}:500s`, new Date());
return this._redis.incrEx(key, this._expiry, callback); return this._redis.incrEx(key, this._expiry, callback);
} }
/**
* wrapper on `getStats` that handles a list of keys
* @param {object} log - Werelogs request logger
* @param {array} ids - service identifiers
* @param {callback} cb - callback to call with the err/result
* @return {undefined}
*/
getAllStats(log, ids, cb) {
if (!this._redis) {
return cb(null, {});
}
const statsRes = {
'requests': 0,
'500s': 0,
'sampleDuration': this._expiry,
};
let requests = 0;
let errors = 0;
// for now set concurrency to default of 10
return async.eachLimit(ids, 10, (id, done) => {
this.getStats(log, id, (err, res) => {
if (err) {
return done(err);
}
requests += res.requests;
errors += res['500s'];
return done();
});
}, error => {
if (error) {
log.error('error getting stats', {
error,
method: 'StatsClient.getAllStats',
});
return cb(null, statsRes);
}
statsRes.requests = requests;
statsRes['500s'] = errors;
return cb(null, statsRes);
});
}
/** /**
* get stats for the last x seconds, x being the sampling duration * get stats for the last x seconds, x being the sampling duration
* @param {object} log - Werelogs request logger * @param {object} log - Werelogs request logger
@ -189,8 +121,8 @@ class StatsClient {
const reqsKeys = []; const reqsKeys = [];
const req500sKeys = []; const req500sKeys = [];
for (let i = 0; i < totalKeys; i++) { for (let i = 0; i < totalKeys; i++) {
reqsKeys.push(['get', this.buildKey(`${id}:requests`, d)]); reqsKeys.push(['get', this._buildKey(`${id}:requests`, d)]);
req500sKeys.push(['get', this.buildKey(`${id}:500s`, d)]); req500sKeys.push(['get', this._buildKey(`${id}:500s`, d)]);
this._setPrevInterval(d); this._setPrevInterval(d);
} }
return async.parallel([ return async.parallel([

View File

@ -1,7 +1,4 @@
const async = require('async');
const StatsClient = require('./StatsClient'); const StatsClient = require('./StatsClient');
/** /**
* @class StatsModel * @class StatsModel
* *
@ -9,141 +6,6 @@ const StatsClient = require('./StatsClient');
* rather than by seconds * rather than by seconds
*/ */
class StatsModel extends StatsClient { class StatsModel extends StatsClient {
/**
* Utility method to convert 2d array rows to columns, and vice versa
* See also: https://docs.ruby-lang.org/en/2.0.0/Array.html#method-i-zip
* @param {array} arrays - 2d array of integers
* @return {array} converted array
*/
_zip(arrays) {
if (arrays.length > 0 && arrays.every(a => Array.isArray(a))) {
return arrays[0].map((_, i) => arrays.map(a => a[i]));
}
return [];
}
/**
* normalize to the nearest interval
* @param {object} d - Date instance
* @return {number} timestamp - normalized to the nearest interval
*/
_normalizeTimestamp(d) {
const m = d.getMinutes();
return d.setMinutes(m - m % (Math.floor(this._interval / 60)), 0, 0);
}
/**
* override the method to get the count as an array of integers separated
* by each interval
* typical input looks like [[null, '1'], [null, '2'], [null, null]...]
* @param {array} arr - each index contains the result of each batch command
* where index 0 signifies the error and index 1 contains the result
* @return {array} array of integers, ordered from most recent interval to
* oldest interval with length of (expiry / interval)
*/
_getCount(arr) {
const size = Math.floor(this._expiry / this._interval);
const array = arr.reduce((store, i) => {
let num = parseInt(i[1], 10);
num = Number.isNaN(num) ? 0 : num;
store.push(num);
return store;
}, []);
if (array.length < size) {
array.push(...Array(size - array.length).fill(0));
}
return array;
}
/**
* wrapper on `getStats` that handles a list of keys
* override the method to reduce the returned 2d array from `_getCount`
* @param {object} log - Werelogs request logger
* @param {array} ids - service identifiers
* @param {callback} cb - callback to call with the err/result
* @return {undefined}
*/
getAllStats(log, ids, cb) {
if (!this._redis) {
return cb(null, {});
}
const size = Math.floor(this._expiry / this._interval);
const statsRes = {
'requests': Array(size).fill(0),
'500s': Array(size).fill(0),
'sampleDuration': this._expiry,
};
const requests = [];
const errors = [];
if (ids.length === 0) {
return cb(null, statsRes);
}
// for now set concurrency to default of 10
return async.eachLimit(ids, 10, (id, done) => {
this.getStats(log, id, (err, res) => {
if (err) {
return done(err);
}
requests.push(res.requests);
errors.push(res['500s']);
return done();
});
}, error => {
if (error) {
log.error('error getting stats', {
error,
method: 'StatsModel.getAllStats',
});
return cb(null, statsRes);
}
statsRes.requests = this._zip(requests).map(arr =>
arr.reduce((acc, i) => acc + i), 0);
statsRes['500s'] = this._zip(errors).map(arr =>
arr.reduce((acc, i) => acc + i), 0);
return cb(null, statsRes);
});
}
/**
* Handles getting a list of global keys.
* @param {array} ids - Service identifiers
* @param {object} log - Werelogs request logger
* @param {function} cb - Callback
* @return {undefined}
*/
getAllGlobalStats(ids, log, cb) {
const reqsKeys = ids.map(key => (['get', key]));
return this._redis.batch(reqsKeys, (err, res) => {
const statsRes = { requests: 0 };
if (err) {
log.error('error getting metrics', {
error: err,
method: 'StatsClient.getAllGlobalStats',
});
return cb(null, statsRes);
}
statsRes.requests = res.reduce((sum, curr) => {
const [cmdErr, val] = curr;
if (cmdErr) {
// Log any individual request errors from the batch request.
log.error('error getting metrics', {
error: cmdErr,
method: 'StatsClient.getAllGlobalStats',
});
}
return sum + (Number.parseInt(val, 10) || 0);
}, 0);
return cb(null, statsRes);
});
}
/** /**
* normalize date timestamp to the nearest hour * normalize date timestamp to the nearest hour
* @param {Date} d - Date instance * @param {Date} d - Date instance
@ -162,6 +24,34 @@ class StatsModel extends StatsClient {
return d.setHours(d.getHours() - 1); return d.setHours(d.getHours() - 1);
} }
/**
* normalize to the nearest interval
* @param {object} d - Date instance
* @return {number} timestamp - normalized to the nearest interval
*/
_normalizeTimestamp(d) {
const m = d.getMinutes();
return d.setMinutes(m - m % (Math.floor(this._interval / 60)), 0, 0);
}
/**
* override the method to get the result as an array of integers separated
* by each interval
* typical input looks like [[null, '1'], [null, '2'], [null, null]...]
* @param {array} arr - each index contains the result of each batch command
* where index 0 signifies the error and index 1 contains the result
* @return {array} array of integers, ordered from most recent interval to
* oldest interval
*/
_getCount(arr) {
return arr.reduce((store, i) => {
let num = parseInt(i[1], 10);
num = Number.isNaN(num) ? 0 : num;
store.push(num);
return store;
}, []);
}
/** /**
* get list of sorted set key timestamps * get list of sorted set key timestamps
* @param {number} epoch - epoch time * @param {number} epoch - epoch time

View File

@ -1,13 +1,10 @@
const assert = require('assert'); const assert = require('assert');
const uuid = require('uuid/v4');
const { WebsiteConfiguration } = require('./WebsiteConfiguration'); const { WebsiteConfiguration } = require('./WebsiteConfiguration');
const ReplicationConfiguration = require('./ReplicationConfiguration'); const ReplicationConfiguration = require('./ReplicationConfiguration');
const LifecycleConfiguration = require('./LifecycleConfiguration'); const LifecycleConfiguration = require('./LifecycleConfiguration');
// WHEN UPDATING THIS NUMBER, UPDATE MODELVERSION.MD CHANGELOG // WHEN UPDATING THIS NUMBER, UPDATE MODELVERSION.MD CHANGELOG
// MODELVERSION.MD can be found in S3 repo: lib/metadata/ModelVersion.md const modelVersion = 6;
const modelVersion = 9;
class BucketInfo { class BucketInfo {
/** /**
@ -50,17 +47,12 @@ class BucketInfo {
* @param {string[]} [cors[].exposeHeaders] - headers expose to applications * @param {string[]} [cors[].exposeHeaders] - headers expose to applications
* @param {object} [replicationConfiguration] - replication configuration * @param {object} [replicationConfiguration] - replication configuration
* @param {object} [lifecycleConfiguration] - lifecycle configuration * @param {object} [lifecycleConfiguration] - lifecycle configuration
* @param {string} [uid] - unique identifier for the bucket, necessary
* @param {string} readLocationConstraint - readLocationConstraint for bucket
* addition for use with lifecycle operations
* @param {boolean} [isNFS] - whether the bucket is on NFS
*/ */
constructor(name, owner, ownerDisplayName, creationDate, constructor(name, owner, ownerDisplayName, creationDate,
mdBucketModelVersion, acl, transient, deleted, mdBucketModelVersion, acl, transient, deleted,
serverSideEncryption, versioningConfiguration, serverSideEncryption, versioningConfiguration,
locationConstraint, websiteConfiguration, cors, locationConstraint, websiteConfiguration, cors,
replicationConfiguration, lifecycleConfiguration, uid, replicationConfiguration, lifecycleConfiguration) {
readLocationConstraint, isNFS) {
assert.strictEqual(typeof name, 'string'); assert.strictEqual(typeof name, 'string');
assert.strictEqual(typeof owner, 'string'); assert.strictEqual(typeof owner, 'string');
assert.strictEqual(typeof ownerDisplayName, 'string'); assert.strictEqual(typeof ownerDisplayName, 'string');
@ -98,9 +90,6 @@ class BucketInfo {
if (locationConstraint) { if (locationConstraint) {
assert.strictEqual(typeof locationConstraint, 'string'); assert.strictEqual(typeof locationConstraint, 'string');
} }
if (readLocationConstraint) {
assert.strictEqual(typeof readLocationConstraint, 'string');
}
if (websiteConfiguration) { if (websiteConfiguration) {
assert(websiteConfiguration instanceof WebsiteConfiguration); assert(websiteConfiguration instanceof WebsiteConfiguration);
const { indexDocument, errorDocument, redirectAllRequestsTo, const { indexDocument, errorDocument, redirectAllRequestsTo,
@ -123,10 +112,6 @@ class BucketInfo {
if (lifecycleConfiguration) { if (lifecycleConfiguration) {
LifecycleConfiguration.validateConfig(lifecycleConfiguration); LifecycleConfiguration.validateConfig(lifecycleConfiguration);
} }
if (uid) {
assert.strictEqual(typeof uid, 'string');
assert.strictEqual(uid.length, 36);
}
const aclInstance = acl || { const aclInstance = acl || {
Canned: 'private', Canned: 'private',
FULL_CONTROL: [], FULL_CONTROL: [],
@ -148,13 +133,10 @@ class BucketInfo {
this._serverSideEncryption = serverSideEncryption || null; this._serverSideEncryption = serverSideEncryption || null;
this._versioningConfiguration = versioningConfiguration || null; this._versioningConfiguration = versioningConfiguration || null;
this._locationConstraint = locationConstraint || null; this._locationConstraint = locationConstraint || null;
this._readLocationConstraint = readLocationConstraint || null;
this._websiteConfiguration = websiteConfiguration || null; this._websiteConfiguration = websiteConfiguration || null;
this._replicationConfiguration = replicationConfiguration || null; this._replicationConfiguration = replicationConfiguration || null;
this._cors = cors || null; this._cors = cors || null;
this._lifecycleConfiguration = lifecycleConfiguration || null; this._lifecycleConfiguration = lifecycleConfiguration || null;
this._uid = uid || uuid();
this._isNFS = isNFS || null;
return this; return this;
} }
/** /**
@ -174,13 +156,10 @@ class BucketInfo {
serverSideEncryption: this._serverSideEncryption, serverSideEncryption: this._serverSideEncryption,
versioningConfiguration: this._versioningConfiguration, versioningConfiguration: this._versioningConfiguration,
locationConstraint: this._locationConstraint, locationConstraint: this._locationConstraint,
readLocationConstraint: this._readLocationConstraint,
websiteConfiguration: undefined, websiteConfiguration: undefined,
cors: this._cors, cors: this._cors,
replicationConfiguration: this._replicationConfiguration, replicationConfiguration: this._replicationConfiguration,
lifecycleConfiguration: this._lifecycleConfiguration, lifecycleConfiguration: this._lifecycleConfiguration,
uid: this._uid,
isNFS: this._isNFS,
}; };
if (this._websiteConfiguration) { if (this._websiteConfiguration) {
bucketInfos.websiteConfiguration = bucketInfos.websiteConfiguration =
@ -201,8 +180,7 @@ class BucketInfo {
obj.creationDate, obj.mdBucketModelVersion, obj.acl, obj.creationDate, obj.mdBucketModelVersion, obj.acl,
obj.transient, obj.deleted, obj.serverSideEncryption, obj.transient, obj.deleted, obj.serverSideEncryption,
obj.versioningConfiguration, obj.locationConstraint, websiteConfig, obj.versioningConfiguration, obj.locationConstraint, websiteConfig,
obj.cors, obj.replicationConfiguration, obj.lifecycleConfiguration, obj.cors, obj.replicationConfiguration, obj.lifecycleConfiguration);
obj.uid, obj.readLocationConstraint, obj.isNFS);
} }
/** /**
@ -225,8 +203,7 @@ class BucketInfo {
data._transient, data._deleted, data._serverSideEncryption, data._transient, data._deleted, data._serverSideEncryption,
data._versioningConfiguration, data._locationConstraint, data._versioningConfiguration, data._locationConstraint,
data._websiteConfiguration, data._cors, data._websiteConfiguration, data._cors,
data._replicationConfiguration, data._lifecycleConfiguration, data._replicationConfiguration, data._lifecycleConfiguration);
data._uid, data._readLocationConstraint, data._isNFS);
} }
/** /**
@ -472,17 +449,6 @@ class BucketInfo {
return this._locationConstraint; return this._locationConstraint;
} }
/**
* Get read location constraint.
* @return {string} - bucket read location constraint
*/
getReadLocationConstraint() {
if (this._readLocationConstraint) {
return this._readLocationConstraint;
}
return this._locationConstraint;
}
/** /**
* Set Bucket model version * Set Bucket model version
* *
@ -555,29 +521,6 @@ class BucketInfo {
return this._versioningConfiguration && return this._versioningConfiguration &&
this._versioningConfiguration.Status === 'Enabled'; this._versioningConfiguration.Status === 'Enabled';
} }
/**
* Get unique id of bucket.
* @return {string} - unique id
*/
getUid() {
return this._uid;
}
/**
* Check if the bucket is an NFS bucket.
* @return {boolean} - Wether the bucket is NFS or not
*/
isNFS() {
return this._isNFS;
}
/**
* Set whether the bucket is an NFS bucket.
* @param {boolean} isNFS - Wether the bucket is NFS or not
* @return {BucketInfo} - bucket info instance
*/
setIsNFS(isNFS) {
this._isNFS = isNFS;
return this;
}
} }
module.exports = BucketInfo; module.exports = BucketInfo;

View File

@ -269,7 +269,7 @@ class LifecycleConfiguration {
return filterObj; return filterObj;
} }
if (filter.Tag) { if (filter.Tag) {
const tagObj = this._parseTags(filter.Tag); const tagObj = this._parseTags(filter.Tag[0]);
if (tagObj.error) { if (tagObj.error) {
filterObj.error = tagObj.error; filterObj.error = tagObj.error;
return filterObj; return filterObj;
@ -287,7 +287,7 @@ class LifecycleConfiguration {
if (andF.Prefix && andF.Prefix.length >= 1) { if (andF.Prefix && andF.Prefix.length >= 1) {
filterObj.rulePrefix = andF.Prefix.pop(); filterObj.rulePrefix = andF.Prefix.pop();
} }
const tagObj = this._parseTags(andF.Tag); const tagObj = this._parseTags(andF.Tag[0]);
if (tagObj.error) { if (tagObj.error) {
filterObj.error = tagObj.error; filterObj.error = tagObj.error;
return filterObj; return filterObj;
@ -320,28 +320,31 @@ class LifecycleConfiguration {
// reset _tagKeys to empty because keys cannot overlap within a rule, // reset _tagKeys to empty because keys cannot overlap within a rule,
// but different rules can have the same tag keys // but different rules can have the same tag keys
this._tagKeys = []; this._tagKeys = [];
for (let i = 0; i < tags.length; i++) { if (!tags.Key || !tags.Value) {
if (!tags[i].Key || !tags[i].Value) { tagObj.error = errors.MissingRequiredParameter.customizeDescription(
tagObj.error =
errors.MissingRequiredParameter.customizeDescription(
'Tag XML does not contain both Key and Value'); 'Tag XML does not contain both Key and Value');
break; return tagObj;
} }
if (tags.Key.length !== tags.Value.length) {
if (tags[i].Key[0].length < 1 || tags[i].Key[0].length > 128) { tagObj.error = errors.MalformedXML.customizeDescription(
'Tag XML should contain same number of Keys and Values');
return tagObj;
}
for (let i = 0; i < tags.Key.length; i++) {
if (tags.Key[i].length < 1 || tags.Key[i].length > 128) {
tagObj.error = errors.InvalidRequest.customizeDescription( tagObj.error = errors.InvalidRequest.customizeDescription(
'Tag Key must be a length between 1 and 128 char'); 'Tag Key must be a length between 1 and 128 char');
break; break;
} }
if (this._tagKeys.includes(tags[i].Key[0])) { if (this._tagKeys.includes(tags.Key[i])) {
tagObj.error = errors.InvalidRequest.customizeDescription( tagObj.error = errors.InvalidRequest.customizeDescription(
'Tag Keys must be unique'); 'Tag Keys must be unique');
break; break;
} }
this._tagKeys.push(tags[i].Key[0]); this._tagKeys.push(tags.Key[i]);
const tag = { const tag = {
key: tags[i].Key[0], key: tags.Key[i],
val: tags[i].Value[0], val: tags.Value[i],
}; };
tagObj.tags.push(tag); tagObj.tags.push(tag);
} }
@ -674,12 +677,13 @@ class LifecycleConfiguration {
const Prefix = rulePrefix ? `<Prefix>${rulePrefix}</Prefix>` : ''; const Prefix = rulePrefix ? `<Prefix>${rulePrefix}</Prefix>` : '';
let tagXML = ''; let tagXML = '';
if (tags) { if (tags) {
tagXML = tags.map(t => { const keysVals = tags.map(t => {
const { key, val } = t; const { key, val } = t;
const Tag = `<Tag><Key>${key}</Key>` + const Tag = `<Key>${key}</Key>` +
`<Value>${val}</Value></Tag>`; `<Value>${val}</Value>`;
return Tag; return Tag;
}).join(''); }).join('');
tagXML = `<Tag>${keysVals}</Tag>`;
} }
let Filter; let Filter;
if (rulePrefix && !tags) { if (rulePrefix && !tags) {

View File

@ -120,7 +120,6 @@ class ObjectMD {
role: '', role: '',
storageType: '', storageType: '',
dataStoreVersionId: '', dataStoreVersionId: '',
isNFS: null,
}, },
'dataStoreName': '', 'dataStoreName': '',
}; };
@ -649,19 +648,6 @@ class ObjectMD {
return this._data.isDeleteMarker; return this._data.isDeleteMarker;
} }
/**
* Get if the object is a multipart upload (MPU)
*
* The function checks the "content-md5" field: if it contains a
* dash ('-') it is a MPU, as the content-md5 string ends with
* "-[nbparts]" for MPUs.
*
* @return {boolean} Whether object is a multipart upload
*/
isMultipartUpload() {
return this.getContentMd5().includes('-');
}
/** /**
* Set metadata versionId value * Set metadata versionId value
* *
@ -689,11 +675,8 @@ class ObjectMD {
* @return {string} The encoded object versionId * @return {string} The encoded object versionId
*/ */
getEncodedVersionId() { getEncodedVersionId() {
if (this.getVersionId()) {
return VersionIDUtils.encode(this.getVersionId()); return VersionIDUtils.encode(this.getVersionId());
} }
return undefined;
}
/** /**
* Set tags * Set tags
@ -723,7 +706,7 @@ class ObjectMD {
*/ */
setReplicationInfo(replicationInfo) { setReplicationInfo(replicationInfo) {
const { status, backends, content, destination, storageClass, role, const { status, backends, content, destination, storageClass, role,
storageType, dataStoreVersionId, isNFS } = replicationInfo; storageType, dataStoreVersionId } = replicationInfo;
this._data.replicationInfo = { this._data.replicationInfo = {
status, status,
backends, backends,
@ -733,7 +716,6 @@ class ObjectMD {
role, role,
storageType: storageType || '', storageType: storageType || '',
dataStoreVersionId: dataStoreVersionId || '', dataStoreVersionId: dataStoreVersionId || '',
isNFS: isNFS || null,
}; };
return this; return this;
} }
@ -752,24 +734,6 @@ class ObjectMD {
return this; return this;
} }
/**
* Set whether the replication is occurring from an NFS bucket.
* @param {Boolean} isNFS - Whether replication from an NFS bucket
* @return {ObjectMD} itself
*/
setReplicationIsNFS(isNFS) {
this._data.replicationInfo.isNFS = isNFS;
return this;
}
/**
* Get whether the replication is occurring from an NFS bucket.
* @return {Boolean} Whether replication from an NFS bucket
*/
getReplicationIsNFS() {
return this._data.replicationInfo.isNFS;
}
setReplicationSiteStatus(site, status) { setReplicationSiteStatus(site, status) {
const backend = this._data.replicationInfo.backends const backend = this._data.replicationInfo.backends
.find(o => o.site === site); .find(o => o.site === site);

View File

@ -59,7 +59,6 @@ class ReplicationConfiguration {
this._rules = null; this._rules = null;
this._prevStorageClass = null; this._prevStorageClass = null;
this._hasScalityDestination = null; this._hasScalityDestination = null;
this._preferredReadLocation = null;
} }
/** /**
@ -86,18 +85,6 @@ class ReplicationConfiguration {
return this._rules; return this._rules;
} }
/**
* The preferred read location
* @return {string|null} - The preferred read location if defined,
* otherwise null
*
* FIXME ideally we should be able to specify one preferred read
* location for each rule
*/
getPreferredReadLocation() {
return this._preferredReadLocation;
}
/** /**
* Get the replication configuration * Get the replication configuration
* @return {object} - The replication configuration * @return {object} - The replication configuration
@ -107,7 +94,6 @@ class ReplicationConfiguration {
role: this.getRole(), role: this.getRole(),
destination: this.getDestination(), destination: this.getDestination(),
rules: this.getRules(), rules: this.getRules(),
preferredReadLocation: this.getPreferredReadLocation(),
}; };
} }
@ -306,14 +292,6 @@ class ReplicationConfiguration {
return undefined; return undefined;
} }
const storageClasses = destination.StorageClass[0].split(','); const storageClasses = destination.StorageClass[0].split(',');
const prefReadIndex = storageClasses.findIndex(storageClass =>
storageClass.endsWith(':preferred_read'));
if (prefReadIndex !== -1) {
const prefRead = storageClasses[prefReadIndex].split(':')[0];
// remove :preferred_read tag from storage class name
storageClasses[prefReadIndex] = prefRead;
this._preferredReadLocation = prefRead;
}
const isValidStorageClass = storageClasses.every(storageClass => { const isValidStorageClass = storageClasses.every(storageClass => {
if (validStorageClasses.includes(storageClass)) { if (validStorageClasses.includes(storageClass)) {
this._hasScalityDestination = this._hasScalityDestination =

View File

@ -352,8 +352,6 @@ class Server {
error: err.stack || err, error: err.stack || err,
address: sock.address(), address: sock.address(),
}); });
// socket is not systematically destroyed
sock.destroy();
} }
/** /**

View File

@ -1,97 +0,0 @@
const httpServer = require('../http/server');
const werelogs = require('werelogs');
const errors = require('../../errors');
function sendError(res, log, error, optMessage) {
res.writeHead(error.code);
let message;
if (optMessage) {
message = optMessage;
} else {
message = error.description || '';
}
log.debug('sending back error response', { httpCode: error.code,
errorType: error.message,
error: message });
res.end(`${JSON.stringify({ errorType: error.message,
errorMessage: message })}\n`);
}
function sendSuccess(res, log, msg) {
res.writeHead(200);
log.debug('replying with success');
const message = msg || 'OK';
res.end(message);
}
function constructEndpoints(ns, path) {
return `/${ns}/${path}`;
}
function checkStub(log) { // eslint-disable-line
return true;
}
class HealthProbeServer extends httpServer {
constructor(params) {
const logging = new werelogs.Logger('HealthProbeServer');
super(params.port, logging);
this.logging = logging;
this.setBindAddress(params.bindAddress || 'localhost');
this._namespace = params.namespace || '_/health';
const livenessURI = constructEndpoints(this._namespace,
params.livenessURI || 'liveness');
const readinessURI = constructEndpoints(this._namespace,
params.readinessURI || 'readiness');
// hooking our request processing function by calling the
// parent's method for that
this.onRequest(this._onRequest);
this._reqHandlers = {};
this._reqHandlers[livenessURI] = this._onLiveness.bind(this);
this._reqHandlers[readinessURI] = this._onReadiness.bind(this);
this._livenessCheck = params.livenessCheck || checkStub;
this._readinessCheck = params.readinessCheck || checkStub;
}
onLiveCheck(f) {
this._livenessCheck = f;
}
onReadyCheck(f) {
this._readinessCheck = f;
}
_onRequest(req, res) {
const log = this.logging.newRequestLogger();
log.debug('request received', { method: req.method,
url: req.url });
if (req.method !== 'GET') {
sendError(res, log, errors.MethodNotAllowed);
}
if (req.url.startsWith(`/${this._namespace}`) &&
req.url in this._reqHandlers) {
this._reqHandlers[req.url](req, res, log);
} else {
sendError(res, log, errors.InvalidURI);
}
}
_onLiveness(req, res, log) {
if (this._livenessCheck(log)) {
sendSuccess(res, log);
} else {
sendError(res, log, errors.ServiceUnavailable);
}
}
_onReadiness(req, res, log) {
if (this._readinessCheck(log)) {
sendSuccess(res, log);
} else {
sendError(res, log, errors.ServiceUnavailable);
}
}
}
module.exports = HealthProbeServer;

View File

@ -214,7 +214,7 @@ class RESTServer extends httpServer {
if (req.url.startsWith(`${constants.dataFileURL}?`)) { if (req.url.startsWith(`${constants.dataFileURL}?`)) {
const queryParam = url.parse(req.url).query; const queryParam = url.parse(req.url).query;
if (queryParam === 'diskUsage') { if (queryParam === 'diskUsage') {
return this.dataStore.getDiskUsage((err, result) => { this.dataStore.getDiskUsage((err, result) => {
if (err) { if (err) {
return sendError(res, log, err); return sendError(res, log, err);
} }

View File

@ -2,13 +2,11 @@
const Ajv = require('ajv'); const Ajv = require('ajv');
const userPolicySchema = require('./userPolicySchema'); const userPolicySchema = require('./userPolicySchema');
const resourcePolicySchema = require('./resourcePolicySchema');
const errors = require('../errors'); const errors = require('../errors');
const ajValidate = new Ajv({ allErrors: true }); const ajValidate = new Ajv({ allErrors: true });
// compiles schema to functions and caches them for all cases // compiles schema to functions and caches them for all cases
const userPolicyValidate = ajValidate.compile(userPolicySchema); const userPolicyValidate = ajValidate.compile(userPolicySchema);
const resourcePolicyValidate = ajValidate.compile(resourcePolicySchema);
const errDict = { const errDict = {
required: { required: {
@ -26,39 +24,33 @@ const errDict = {
}; };
// parse ajv errors and return early with the first relevant error // parse ajv errors and return early with the first relevant error
function _parseErrors(ajvErrors, policyType) { function _parseErrors(ajvErrors) {
let parsedErr;
if (policyType === 'user') {
// deep copy is needed as we have to assign custom error description // deep copy is needed as we have to assign custom error description
parsedErr = Object.assign({}, errors.MalformedPolicyDocument); const parsedErr = Object.assign({}, errors.MalformedPolicyDocument);
parsedErr.description = 'Syntax errors in policy.'; parsedErr.description = 'Syntax errors in policy.';
}
if (policyType === 'resource') {
parsedErr = Object.assign({}, errors.MalformedPolicy);
}
ajvErrors.some(err => { ajvErrors.some(err => {
const resource = err.dataPath; const resource = err.dataPath;
const field = err.params ? err.params.missingProperty : undefined; const field = err.params ? err.params.missingProperty : undefined;
const errType = err.keyword; const errType = err.keyword;
if (errType === 'type' && (resource === '.Statement' || if (errType === 'type' && (resource === '.Statement' ||
resource.includes('.Resource') || resource === '.Statement.Resource' ||
resource.includes('.NotResource'))) { resource === '.Statement.NotResource')) {
// skip this as this doesn't have enough error context // skip this as this doesn't have enough error context
return false; return false;
} }
if (err.keyword === 'required' && field && errDict.required[field]) { if (err.keyword === 'required' && field && errDict.required[field]) {
parsedErr.description = errDict.required[field]; parsedErr.description = errDict.required[field];
} else if (err.keyword === 'pattern' && } else if (err.keyword === 'pattern' &&
(resource.includes('.Action') || (resource === '.Statement.Action' ||
resource.includes('.NotAction'))) { resource === '.Statement.NotAction')) {
parsedErr.description = errDict.pattern.Action; parsedErr.description = errDict.pattern.Action;
} else if (err.keyword === 'pattern' && } else if (err.keyword === 'pattern' &&
(resource.includes('.Resource') || (resource === '.Statement.Resource' ||
resource.includes('.NotResource'))) { resource === '.Statement.NotResource')) {
parsedErr.description = errDict.pattern.Resource; parsedErr.description = errDict.pattern.Resource;
} else if (err.keyword === 'minItems' && } else if (err.keyword === 'minItems' &&
(resource.includes('.Resource') || (resource === '.Statement.Resource' ||
resource.includes('.NotResource'))) { resource === '.Statement.NotResource')) {
parsedErr.description = errDict.minItems.Resource; parsedErr.description = errDict.minItems.Resource;
} }
return true; return true;
@ -85,24 +77,12 @@ function _validatePolicy(type, policy) {
} }
userPolicyValidate(parseRes); userPolicyValidate(parseRes);
if (userPolicyValidate.errors) { if (userPolicyValidate.errors) {
return { error: _parseErrors(userPolicyValidate.errors, 'user'), return { error: _parseErrors(userPolicyValidate.errors),
valid: false }; valid: false };
} }
return { error: null, valid: true }; return { error: null, valid: true };
} }
if (type === 'resource') { // TODO: add support for resource policies
const parseRes = _safeJSONParse(policy);
if (parseRes instanceof Error) {
return { error: Object.assign({}, errors.MalformedPolicy),
valid: false };
}
resourcePolicyValidate(parseRes);
if (resourcePolicyValidate.errors) {
return { error: _parseErrors(resourcePolicyValidate.errors,
'resource'), valid: false };
}
return { error: null, valid: true };
}
return { error: errors.NotImplemented, valid: false }; return { error: errors.NotImplemented, valid: false };
} }
/** /**

View File

@ -1,477 +0,0 @@
{
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"title": "AWS Bucket Policy schema.",
"description": "This schema describes a bucket policy per AWS policy grammar rules",
"definitions": {
"principalService": {
"type": "object",
"properties": {
"Service": {
"type": "string",
"enum": [
"backbeat"
]
}
},
"additionalProperties": false
},
"principalCanonicalUser": {
"type": "object",
"properties": {
"CanonicalUser": {
"type": "string",
"pattern": "^[0-9a-z]{64}$"
}
},
"additionalProperties": false
},
"principalAnonymous": {
"type": "string",
"pattern": "^\\*$"
},
"principalAWSAccountID": {
"type": "string",
"pattern": "^[0-9]{12}$"
},
"principalAWSAccountArn": {
"type": "string",
"pattern": "^arn:aws:iam::[0-9]{12}:root$"
},
"principalAWSUserArn": {
"type": "string",
"pattern": "^arn:aws:iam::[0-9]{12}:user/[\\w+=,.@ -]{1,64}$"
},
"principalAWSRoleArn": {
"type": "string",
"pattern": "^arn:aws:iam::[0-9]{12}:role/[\\w+=,.@ -]{1,64}$"
},
"principalAWSItem": {
"type": "object",
"properties": {
"AWS": {
"oneOf": [
{ "$ref": "#/definitions/principalAWSAccountID" },
{ "$ref": "#/definitions/principalAnonymous" },
{ "$ref": "#/definitions/principalAWSAccountArn" },
{ "$ref": "#/definitions/principalAWSUserArn" },
{ "$ref": "#/definitions/principalAWSRoleArn" },
{
"type": "array",
"minItems": 1,
"items": {
"$ref": "#/definitions/principalAWSAccountID"
}
},
{
"type": "array",
"minItems": 1,
"items": {
"$ref": "#/definitions/principalAWSAccountArn"
}
},
{
"type": "array",
"minItems": 1,
"items": {
"$ref": "#/definitions/principalAWSRoleArn"
}
},
{
"type": "array",
"minItems": 1,
"items": {
"$ref": "#/definitions/principalAWSUserArn"
}
}
]
}
},
"additionalProperties": false
},
"principalItem": {
"oneOf": [
{ "$ref": "#/definitions/principalAWSItem" },
{ "$ref": "#/definitions/principalAnonymous" },
{ "$ref": "#/definitions/principalService" },
{ "$ref": "#/definitions/principalCanonicalUser" }
]
},
"actionItem": {
"type": "string",
"pattern": "^[^*:]+:([^:])+|^\\*$"
},
"resourceItem": {
"type": "string",
"pattern": "^\\*|arn:(aws|scality)(:(\\*{1}|[a-z0-9\\*\\-]{2,})*?){3}:((?!\\$\\{\\}).)*?$"
},
"conditionKeys" : {
"properties": {
"aws:CurrentTime": {},
"aws:EpochTime": {},
"aws:MultiFactorAuthAge": {},
"aws:MultiFactorAuthPresent": {},
"aws:PrincipalArn": {},
"aws:PrincipalOrgId": {},
"aws:PrincipalTag/${TagKey}": {},
"aws:PrincipalType": {},
"aws:Referer": {},
"aws:RequestTag/${TagKey}": {},
"aws:RequestedRegion": {},
"aws:SecureTransport": {},
"aws:SourceAccount": {},
"aws:SourceArn": {},
"aws:SourceIp": {},
"aws:SourceVpc": {},
"aws:SourceVpce": {},
"aws:TagKeys": {},
"aws:TokenIssueTime": {},
"aws:UserAgent": {},
"aws:userid": {},
"aws:username": {},
"s3:ExistingJobOperation": {},
"s3:ExistingJobPriority": {},
"s3:ExistingObjectTag/<key>": {},
"s3:JobSuspendedCause": {},
"s3:LocationConstraint": {},
"s3:RequestJobOperation": {},
"s3:RequestJobPriority": {},
"s3:RequestObjectTag/<key>": {},
"s3:RequestObjectTagKeys": {},
"s3:VersionId": {},
"s3:authtype": {},
"s3:delimiter": {},
"s3:locationconstraint": {},
"s3:max-keys": {},
"s3:object-lock-legal-hold": {},
"s3:object-lock-mode": {},
"s3:object-lock-remaining-retention-days": {},
"s3:object-lock-retain-until-date": {},
"s3:prefix": {},
"s3:signatureage": {},
"s3:signatureversion": {},
"s3:versionid": {},
"s3:x-amz-acl": {},
"s3:x-amz-content-sha256": {},
"s3:x-amz-copy-source": {},
"s3:x-amz-grant-full-control": {},
"s3:x-amz-grant-read": {},
"s3:x-amz-grant-read-acp": {},
"s3:x-amz-grant-write": {},
"s3:x-amz-grant-write-acp": {},
"s3:x-amz-metadata-directive": {},
"s3:x-amz-server-side-encryption": {},
"s3:x-amz-server-side-encryption-aws-kms-key-id": {},
"s3:x-amz-storage-class": {},
"s3:x-amz-website-redirect-location": {}
},
"additionalProperties": false
},
"conditions": {
"type": "object",
"properties": {
"ArnEquals": {
"type": "object"
},
"ArnEqualsIfExists": {
"type": "object"
},
"ArnLike": {
"type": "object"
},
"ArnLikeIfExists": {
"type": "object"
},
"ArnNotEquals": {
"type": "object"
},
"ArnNotEqualsIfExists": {
"type": "object"
},
"ArnNotLike": {
"type": "object"
},
"ArnNotLikeIfExists": {
"type": "object"
},
"BinaryEquals": {
"type": "object"
},
"BinaryEqualsIfExists": {
"type": "object"
},
"BinaryNotEquals": {
"type": "object"
},
"BinaryNotEqualsIfExists": {
"type": "object"
},
"Bool": {
"type": "object"
},
"BoolIfExists": {
"type": "object"
},
"DateEquals": {
"type": "object"
},
"DateEqualsIfExists": {
"type": "object"
},
"DateGreaterThan": {
"type": "object"
},
"DateGreaterThanEquals": {
"type": "object"
},
"DateGreaterThanEqualsIfExists": {
"type": "object"
},
"DateGreaterThanIfExists": {
"type": "object"
},
"DateLessThan": {
"type": "object"
},
"DateLessThanEquals": {
"type": "object"
},
"DateLessThanEqualsIfExists": {
"type": "object"
},
"DateLessThanIfExists": {
"type": "object"
},
"DateNotEquals": {
"type": "object"
},
"DateNotEqualsIfExists": {
"type": "object"
},
"IpAddress": {
"type": "object"
},
"IpAddressIfExists": {
"type": "object"
},
"NotIpAddress": {
"type": "object"
},
"NotIpAddressIfExists": {
"type": "object"
},
"Null": {
"type": "object"
},
"NumericEquals": {
"type": "object"
},
"NumericEqualsIfExists": {
"type": "object"
},
"NumericGreaterThan": {
"type": "object"
},
"NumericGreaterThanEquals": {
"type": "object"
},
"NumericGreaterThanEqualsIfExists": {
"type": "object"
},
"NumericGreaterThanIfExists": {
"type": "object"
},
"NumericLessThan": {
"type": "object"
},
"NumericLessThanEquals": {
"type": "object"
},
"NumericLessThanEqualsIfExists": {
"type": "object"
},
"NumericLessThanIfExists": {
"type": "object"
},
"NumericNotEquals": {
"type": "object"
},
"NumericNotEqualsIfExists": {
"type": "object"
},
"StringEquals": {
"type": "object"
},
"StringEqualsIfExists": {
"type": "object"
},
"StringEqualsIgnoreCase": {
"type": "object"
},
"StringEqualsIgnoreCaseIfExists": {
"type": "object"
},
"StringLike": {
"type": "object"
},
"StringLikeIfExists": {
"type": "object"
},
"StringNotEquals": {
"type": "object"
},
"StringNotEqualsIfExists": {
"type": "object"
},
"StringNotEqualsIgnoreCase": {
"type": "object"
},
"StringNotEqualsIgnoreCaseIfExists": {
"type": "object"
},
"StringNotLike": {
"type": "object"
},
"StringNotLikeIfExists": {
"type": "object"
}
},
"additionalProperties": false
}
},
"properties": {
"Version": {
"type": "string",
"enum": [
"2012-10-17"
]
},
"Statement": {
"oneOf": [
{
"type": [
"array"
],
"minItems": 1,
"items": {
"type": "object",
"properties": {
"Sid": {
"type": "string",
"pattern": "^[a-zA-Z0-9]+$"
},
"Action": {
"oneOf": [
{
"$ref": "#/definitions/actionItem"
},
{
"type": "array",
"items": {
"$ref": "#/definitions/actionItem"
}
}
]
},
"Effect": {
"type": "string",
"enum": [
"Allow",
"Deny"
]
},
"Principal": {
"$ref": "#/definitions/principalItem"
},
"Resource": {
"oneOf": [
{
"$ref": "#/definitions/resourceItem"
},
{
"type": "array",
"items": {
"$ref": "#/definitions/resourceItem"
},
"minItems": 1
}
]
},
"Condition": {
"$ref": "#/definitions/conditions"
}
},
"required": [
"Action",
"Effect",
"Principal",
"Resource"
]
}
},
{
"type": [
"object"
],
"properties": {
"Sid": {
"type": "string",
"pattern": "^[a-zA-Z0-9]+$"
},
"Action": {
"oneOf": [
{
"$ref": "#/definitions/actionItem"
},
{
"type": "array",
"items": {
"$ref": "#/definitions/actionItem"
}
}
]
},
"Effect": {
"type": "string",
"enum": [
"Allow",
"Deny"
]
},
"Principal": {
"$ref": "#/definitions/principalItem"
},
"Resource": {
"oneOf": [
{
"$ref": "#/definitions/resourceItem"
},
{
"type": "array",
"items": {
"$ref": "#/definitions/resourceItem"
},
"minItems": 1
}
]
},
"Condition": {
"$ref": "#/definitions/conditions"
}
},
"required": [
"Action",
"Effect",
"Resource",
"Principal"
]
}
]
}
},
"required": [
"Version",
"Statement"
],
"additionalProperties": false
}

View File

@ -1,7 +1,7 @@
{ {
"$schema": "http://json-schema.org/draft-04/schema#", "$schema": "http://json-schema.org/draft-04/schema#",
"type": "object", "type": "object",
"title": "AWS IAM Policy schema.", "title": "AWS Policy schema.",
"description": "This schema describes a user policy per AWS policy grammar rules", "description": "This schema describes a user policy per AWS policy grammar rules",
"definitions": { "definitions": {
"principalService": { "principalService": {

View File

@ -32,9 +32,6 @@ const _actionMap = {
bucketPutLifecycle: 's3:PutLifecycleConfiguration', bucketPutLifecycle: 's3:PutLifecycleConfiguration',
bucketGetLifecycle: 's3:GetLifecycleConfiguration', bucketGetLifecycle: 's3:GetLifecycleConfiguration',
bucketDeleteLifecycle: 's3:DeleteLifecycleConfiguration', bucketDeleteLifecycle: 's3:DeleteLifecycleConfiguration',
bucketPutPolicy: 's3:PutBucketPolicy',
bucketGetPolicy: 's3:GetBucketPolicy',
bucketDeletePolicy: 's3:DeleteBucketPolicy',
completeMultipartUpload: 's3:PutObject', completeMultipartUpload: 's3:PutObject',
initiateMultipartUpload: 's3:PutObject', initiateMultipartUpload: 's3:PutObject',
listMultipartUploads: 's3:ListBucketMultipartUploads', listMultipartUploads: 's3:ListBucketMultipartUploads',

View File

@ -68,31 +68,6 @@ function _checkUnmodifiedSince(ifUnmodifiedSinceTime, lastModified) {
return res; return res;
} }
/**
* checks 'if-modified-since' and 'if-unmodified-since' headers if included in
* request against last-modified date of object
* @param {object} headers - headers from request object
* @param {string} lastModified - last modified date of object
* @return {object} contains modifiedSince and unmodifiedSince res objects
*/
function checkDateModifiedHeaders(headers, lastModified) {
let lastModifiedDate = new Date(lastModified);
lastModifiedDate.setMilliseconds(0);
lastModifiedDate = lastModifiedDate.getTime();
const ifModifiedSinceHeader = headers['if-modified-since'] ||
headers['x-amz-copy-source-if-modified-since'];
const ifUnmodifiedSinceHeader = headers['if-unmodified-since'] ||
headers['x-amz-copy-source-if-unmodified-since'];
const modifiedSinceRes = _checkModifiedSince(ifModifiedSinceHeader,
lastModifiedDate);
const unmodifiedSinceRes = _checkUnmodifiedSince(ifUnmodifiedSinceHeader,
lastModifiedDate);
return { modifiedSinceRes, unmodifiedSinceRes };
}
/** /**
* validateConditionalHeaders - validates 'if-modified-since', * validateConditionalHeaders - validates 'if-modified-since',
* 'if-unmodified-since', 'if-match' or 'if-none-match' headers if included in * 'if-unmodified-since', 'if-match' or 'if-none-match' headers if included in
@ -104,14 +79,23 @@ function checkDateModifiedHeaders(headers, lastModified) {
* empty object if no error * empty object if no error
*/ */
function validateConditionalHeaders(headers, lastModified, contentMD5) { function validateConditionalHeaders(headers, lastModified, contentMD5) {
let lastModifiedDate = new Date(lastModified);
lastModifiedDate.setMilliseconds(0);
lastModifiedDate = lastModifiedDate.getTime();
const ifMatchHeader = headers['if-match'] || const ifMatchHeader = headers['if-match'] ||
headers['x-amz-copy-source-if-match']; headers['x-amz-copy-source-if-match'];
const ifNoneMatchHeader = headers['if-none-match'] || const ifNoneMatchHeader = headers['if-none-match'] ||
headers['x-amz-copy-source-if-none-match']; headers['x-amz-copy-source-if-none-match'];
const ifModifiedSinceHeader = headers['if-modified-since'] ||
headers['x-amz-copy-source-if-modified-since'];
const ifUnmodifiedSinceHeader = headers['if-unmodified-since'] ||
headers['x-amz-copy-source-if-unmodified-since'];
const etagMatchRes = _checkEtagMatch(ifMatchHeader, contentMD5); const etagMatchRes = _checkEtagMatch(ifMatchHeader, contentMD5);
const etagNoneMatchRes = _checkEtagNoneMatch(ifNoneMatchHeader, contentMD5); const etagNoneMatchRes = _checkEtagNoneMatch(ifNoneMatchHeader, contentMD5);
const { modifiedSinceRes, unmodifiedSinceRes } = const modifiedSinceRes = _checkModifiedSince(ifModifiedSinceHeader,
checkDateModifiedHeaders(headers, lastModified); lastModifiedDate);
const unmodifiedSinceRes = _checkUnmodifiedSince(ifUnmodifiedSinceHeader,
lastModifiedDate);
// If-Unmodified-Since condition evaluates to false and If-Match // If-Unmodified-Since condition evaluates to false and If-Match
// is not present, then return the error. Otherwise, If-Unmodified-Since is // is not present, then return the error. Otherwise, If-Unmodified-Since is
// silent when If-Match match, and when If-Match does not match, it's the // silent when If-Match match, and when If-Match does not match, it's the
@ -136,6 +120,5 @@ module.exports = {
_checkEtagNoneMatch, _checkEtagNoneMatch,
_checkModifiedSince, _checkModifiedSince,
_checkUnmodifiedSince, _checkUnmodifiedSince,
checkDateModifiedHeaders,
validateConditionalHeaders, validateConditionalHeaders,
}; };

View File

@ -10,8 +10,6 @@ const routeOPTIONS = require('./routes/routeOPTIONS');
const routesUtils = require('./routesUtils'); const routesUtils = require('./routesUtils');
const routeWebsite = require('./routes/routeWebsite'); const routeWebsite = require('./routes/routeWebsite');
const { objectKeyByteLimit } = require('../constants');
const routeMap = { const routeMap = {
GET: routeGET, GET: routeGET,
PUT: routePUT, PUT: routePUT,
@ -57,14 +55,8 @@ function checkBucketAndKey(bucketName, objectKey, method, reqQuery,
blacklistedPrefixes.object); blacklistedPrefixes.object);
if (!result.isValid) { if (!result.isValid) {
log.debug('invalid object key', { objectKey }); log.debug('invalid object key', { objectKey });
if (result.invalidPrefix) { return errors.InvalidArgument.customizeDescription('Object key ' +
return errors.InvalidArgument.customizeDescription('Invalid ' + `must not start with "${result.invalidPrefix}".`);
'prefix - object key cannot start with ' +
`"${result.invalidPrefix}".`);
}
return errors.KeyTooLong.customizeDescription('Object key is too ' +
'long. Maximum number of bytes allowed in keys is ' +
`${objectKeyByteLimit}.`);
} }
} }
if ((reqQuery.partNumber || reqQuery.uploadId) if ((reqQuery.partNumber || reqQuery.uploadId)
@ -175,8 +167,7 @@ function routes(req, res, params, logger) {
logger.newRequestLoggerFromSerializedUids(reqUids) : logger.newRequestLoggerFromSerializedUids(reqUids) :
logger.newRequestLogger()); logger.newRequestLogger());
if (!req.url.startsWith('/_/healthcheck') && if (!req.url.startsWith('/_/healthcheck')) {
!req.url.startsWith('/_/report')) {
log.info('received request', clientInfo); log.info('received request', clientInfo);
} }

View File

@ -4,8 +4,6 @@ const errors = require('../errors');
const constants = require('../constants'); const constants = require('../constants');
const { eachSeries } = require('async'); const { eachSeries } = require('async');
const { objectKeyByteLimit } = require('../constants');
const responseErr = new Error(); const responseErr = new Error();
responseErr.code = 'ResponseError'; responseErr.code = 'ResponseError';
responseErr.message = 'response closed by client request before all data sent'; responseErr.message = 'response closed by client request before all data sent';
@ -286,19 +284,13 @@ function retrieveData(locations, retrieveDataFn, response, log) {
response.destroy(); response.destroy();
responseDestroyed = true; responseDestroyed = true;
}; };
const _destroyReadable = readable => {
// s3-data sends Readable stream only which does not implement destroy
if (readable && readable.destroy) {
readable.destroy();
}
};
// the S3-client might close the connection while we are processing it // the S3-client might close the connection while we are processing it
response.once('close', () => { response.once('close', () => {
log.debug('received close event before response end'); log.debug('received close event before response end');
responseDestroyed = true; responseDestroyed = true;
_destroyReadable(currentStream); if (currentStream) {
currentStream.destroy();
}
}); });
return eachSeries(locations, return eachSeries(locations,
@ -319,7 +311,7 @@ function retrieveData(locations, retrieveDataFn, response, log) {
if (responseDestroyed || response.isclosed) { if (responseDestroyed || response.isclosed) {
log.debug( log.debug(
'response destroyed before readable could stream'); 'response destroyed before readable could stream');
_destroyReadable(readable); readable.destroy();
return next(responseErr); return next(responseErr);
} }
// readable stream successfully consumed // readable stream successfully consumed
@ -876,9 +868,6 @@ const routesUtils = {
if (invalidPrefix) { if (invalidPrefix) {
return { isValid: false, invalidPrefix }; return { isValid: false, invalidPrefix };
} }
if (Buffer.byteLength(objectKey, 'utf8') > objectKeyByteLimit) {
return { isValid: false };
}
return { isValid: true }; return { isValid: true };
}, },

View File

@ -10,7 +10,6 @@ const errors = require('../../../errors');
const stringHash = require('../../../stringHash'); const stringHash = require('../../../stringHash');
const jsutil = require('../../../jsutil'); const jsutil = require('../../../jsutil');
const storageUtils = require('../../utils'); const storageUtils = require('../../utils');
const releasePageCacheSync = require('./utils');
// The FOLDER_HASH constant refers to the number of base directories // The FOLDER_HASH constant refers to the number of base directories
// used for directory hashing of stored objects. // used for directory hashing of stored objects.
@ -44,8 +43,6 @@ class DataFileStore {
* sync calls that ensure files and directories are fully * sync calls that ensure files and directories are fully
* written on the physical drive before returning an * written on the physical drive before returning an
* answer. Used to speed up unit tests, may have other uses. * answer. Used to speed up unit tests, may have other uses.
* @param {Boolean} [dataConfig.noCache=false] - If true, attempt
* to free page caches associated with the managed files
* @param {werelogs.API} [logApi] - object providing a constructor function * @param {werelogs.API} [logApi] - object providing a constructor function
* for the Logger object * for the Logger object
*/ */
@ -53,7 +50,6 @@ class DataFileStore {
this.logger = new (logApi || werelogs).Logger('DataFileStore'); this.logger = new (logApi || werelogs).Logger('DataFileStore');
this.dataPath = dataConfig.dataPath; this.dataPath = dataConfig.dataPath;
this.noSync = dataConfig.noSync || false; this.noSync = dataConfig.noSync || false;
this.noCache = dataConfig.noCache || false;
} }
/** /**
@ -153,29 +149,10 @@ class DataFileStore {
return cbOnce(null, key); return cbOnce(null, key);
} }
if (this.noSync) { if (this.noSync) {
/*
* It's not guaranteed that the Kernel will release page
* caches when this.noSync is true. If you want to ensure
* this behavior, set this.noSync to false.
*/
if (this.noCache) {
releasePageCacheSync(filePath, fd, log);
}
fs.closeSync(fd); fs.closeSync(fd);
return ok(); return ok();
} }
fs.fsync(fd, err => { fs.fsync(fd, err => {
/*
* Disabling the caching of stored files is
* temporary fix for
* https://github.com/kubernetes/kubernetes/issues/43916
* that causes cache memory to be accounted as RSS memory
* for the pod and can potentially cause the pod
* to be killed under memory pressure:
*/
if (this.noCache) {
releasePageCacheSync(filePath, fd, log);
}
fs.close(fd, err => { fs.close(fd, err => {
if (err) { if (err) {
log.error('error closing fd after write', log.error('error closing fd after write',
@ -216,15 +193,6 @@ class DataFileStore {
return cbOnce(errors.InternalError.customizeDescription( return cbOnce(errors.InternalError.customizeDescription(
`read stream error: ${err.code}`)); `read stream error: ${err.code}`));
}); });
dataStream.on('close', () => {
// this means the underlying socket has been closed
log.debug('Client closed socket while streaming',
{ method: 'put', key, filePath });
// destroying the write stream forces a close(fd)
fileStream.destroy();
// we need to unlink the file ourselves
fs.unlinkSync(filePath);
});
return undefined; return undefined;
}); });
} }
@ -273,7 +241,7 @@ class DataFileStore {
flags: 'r', flags: 'r',
encoding: null, encoding: null,
fd: null, fd: null,
autoClose: false, autoClose: true,
}; };
if (byteRange) { if (byteRange) {
readStreamOptions.start = byteRange[0]; readStreamOptions.start = byteRange[0];
@ -288,26 +256,13 @@ class DataFileStore {
return cbOnce(errors.ObjNotFound); return cbOnce(errors.ObjNotFound);
} }
log.error('error retrieving file', log.error('error retrieving file',
{ method: 'DataFileStore.get', key, filePath, { method: 'get', key, filePath,
error: err }); error: err });
return cbOnce( return cbOnce(
errors.InternalError.customizeDescription( errors.InternalError.customizeDescription(
`filesystem read error: ${err.code}`)); `filesystem read error: ${err.code}`));
}) })
.on('open', () => { cbOnce(null, rs); }) .on('open', () => { cbOnce(null, rs); });
.on('end', () => {
if (this.noCache) {
releasePageCacheSync(filePath, rs.fd, log);
}
fs.close(rs.fd, err => {
if (err) {
log.error('unable to close file descriptor', {
method: 'DataFileStore.get', key, filePath,
error: err,
});
}
});
});
} }
/** /**

View File

@ -1,19 +0,0 @@
const posixFadvise = require('fcntl');
/**
* Release free cached pages associated with a file
*
* @param {String} filePath - absolute path of the associated file
* @param {Int} fd - file descriptor of the associated file
* @param {werelogs.RequestLogger} log - logging object
* @return {undefined}
*/
function releasePageCacheSync(filePath, fd, log) {
const ret = posixFadvise(fd, 0, 0, 4);
if (ret !== 0) {
log.warning(
`error fadv_dontneed ${filePath} returned ${ret}`);
}
}
module.exports = releasePageCacheSync;

View File

@ -87,7 +87,6 @@ class MetadataWrapper {
database: params.mongodb.database, database: params.mongodb.database,
replicationGroupId: params.replicationGroupId, replicationGroupId: params.replicationGroupId,
path: params.mongodb.path, path: params.mongodb.path,
config: params.config,
logger, logger,
}); });
this.implName = 'mongoclient'; this.implName = 'mongoclient';
@ -110,7 +109,7 @@ class MetadataWrapper {
if (this.client.setup) { if (this.client.setup) {
return this.client.setup(done); return this.client.setup(done);
} }
return process.nextTick(done); return process.nextTick(() => done);
} }
createBucket(bucketName, bucketMD, log, cb) { createBucket(bucketName, bucketMD, log, cb) {

View File

@ -1,421 +0,0 @@
const cluster = require('cluster');
const async = require('async');
const errors = require('../../../errors');
const BucketInfo = require('../../../models/BucketInfo');
const list = require('../../../algos/list/exportAlgos');
const MetadataFileClient = require('./MetadataFileClient');
const versionSep =
require('../../../versioning/constants')
.VersioningConstants.VersionId.Separator;
const METASTORE = '__metastore';
const itemScanRefreshDelay = 1000 * 30 * 60; // 30 minutes
class BucketFileInterface {
/**
* @constructor
* @param {object} [params] - constructor params
* @param {boolean} [params.noDbOpen=false] - true to skip DB open
* @param {object} logger - logger
* (for unit tests only)
*/
constructor(params, logger) {
this.logger = logger;
const { host, port } = params.metadataClient;
this.constants = params.constants;
this.mdClient = new MetadataFileClient({ host, port });
if (params && params.noDbOpen) {
return;
}
this.lastItemScanTime = null;
this.lastItemScanResult = null;
}
setup(done) {
return this.mdClient.openDB((err, value) => {
if (err) {
throw err;
}
// the metastore sublevel is used to store bucket attributes
this.mdDB = value;
this.metastore = this.mdDB.openSub(METASTORE);
if (cluster.isMaster) {
this.setupMetadataServer(done);
}
});
}
setupMetadataServer(done) {
/* Since the bucket creation API is expecting the
usersBucket to have attributes, we pre-create the
usersBucket attributes here */
this.mdClient.logger.debug('setting up metadata server');
const usersBucketAttr = new BucketInfo(this.constants.usersBucket,
'admin', 'admin', new Date().toJSON(),
BucketInfo.currentModelVersion());
return this.metastore.put(
this.constants.usersBucket,
usersBucketAttr.serialize(), {}, err => {
if (err) {
this.logger.fatal('error writing usersBucket ' +
'attributes to metadata',
{ error: err });
throw (errors.InternalError);
}
return done();
});
}
/**
* Load DB if exists
* @param {String} bucketName - name of bucket
* @param {Object} log - logger
* @param {function} cb - callback(err, db, attr)
* @return {undefined}
*/
loadDBIfExists(bucketName, log, cb) {
this.getBucketAttributes(bucketName, log, (err, attr) => {
if (err) {
return cb(err);
}
try {
const db = this.mdDB.openSub(bucketName);
return cb(null, db, attr);
} catch (err) {
return cb(errors.InternalError);
}
});
return undefined;
}
createBucket(bucketName, bucketMD, log, cb) {
this.getBucketAttributes(bucketName, log, err => {
if (err && err !== errors.NoSuchBucket) {
return cb(err);
}
if (err === undefined) {
return cb(errors.BucketAlreadyExists);
}
this.lastItemScanTime = null;
this.putBucketAttributes(bucketName,
bucketMD,
log, cb);
return undefined;
});
}
getBucketAttributes(bucketName, log, cb) {
this.metastore
.withRequestLogger(log)
.get(bucketName, {}, (err, data) => {
if (err) {
if (err.ObjNotFound) {
return cb(errors.NoSuchBucket);
}
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error getting db attributes', logObj);
return cb(errors.InternalError);
}
return cb(null, BucketInfo.deSerialize(data));
});
return undefined;
}
getBucketAndObject(bucketName, objName, params, log, cb) {
this.loadDBIfExists(bucketName, log, (err, db, bucketAttr) => {
if (err) {
return cb(err);
}
db.withRequestLogger(log)
.get(objName, params, (err, objAttr) => {
if (err) {
if (err.ObjNotFound) {
return cb(null, {
bucket: bucketAttr.serialize(),
});
}
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error getting object', logObj);
return cb(errors.InternalError);
}
return cb(null, {
bucket: bucketAttr.serialize(),
obj: objAttr,
});
});
return undefined;
});
return undefined;
}
putBucketAttributes(bucketName, bucketMD, log, cb) {
this.metastore
.withRequestLogger(log)
.put(bucketName, bucketMD.serialize(), {}, err => {
if (err) {
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error putting db attributes', logObj);
return cb(errors.InternalError);
}
return cb();
});
return undefined;
}
deleteBucket(bucketName, log, cb) {
this.metastore
.withRequestLogger(log)
.del(bucketName, {}, err => {
if (err) {
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error deleting bucket',
logObj);
return cb(errors.InternalError);
}
this.lastItemScanTime = null;
return cb();
});
return undefined;
}
putObject(bucketName, objName, objVal, params, log, cb) {
this.loadDBIfExists(bucketName, log, (err, db) => {
if (err) {
return cb(err);
}
db.withRequestLogger(log)
.put(objName, JSON.stringify(objVal), params, (err, data) => {
if (err) {
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error putting object', logObj);
return cb(errors.InternalError);
}
return cb(err, data);
});
return undefined;
});
}
getObject(bucketName, objName, params, log, cb) {
this.loadDBIfExists(bucketName, log, (err, db) => {
if (err) {
return cb(err);
}
db.withRequestLogger(log).get(objName, params, (err, data) => {
if (err) {
if (err.ObjNotFound) {
return cb(errors.NoSuchKey);
}
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error getting object', logObj);
return cb(errors.InternalError);
}
return cb(null, JSON.parse(data));
});
return undefined;
});
}
deleteObject(bucketName, objName, params, log, cb) {
this.loadDBIfExists(bucketName, log, (err, db) => {
if (err) {
return cb(err);
}
db.withRequestLogger(log).del(objName, params, err => {
if (err) {
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error deleting object', logObj);
return cb(errors.InternalError);
}
return cb();
});
return undefined;
});
}
/**
* This complex function deals with different extensions of bucket listing:
* Delimiter based search or MPU based search.
* @param {String} bucketName - The name of the bucket to list
* @param {Object} params - The params to search
* @param {Object} log - The logger object
* @param {function} cb - Callback when done
* @return {undefined}
*/
internalListObject(bucketName, params, log, cb) {
const extName = params.listingType;
const extension = new list[extName](params, log);
const requestParams = extension.genMDParams();
this.loadDBIfExists(bucketName, log, (err, db) => {
if (err) {
return cb(err);
}
let cbDone = false;
db.withRequestLogger(log)
.createReadStream(requestParams, (err, stream) => {
if (err) {
return cb(err);
}
stream
.on('data', e => {
if (extension.filter(e) < 0) {
stream.emit('end');
stream.destroy();
}
})
.on('error', err => {
if (!cbDone) {
cbDone = true;
const logObj = {
rawError: err,
error: err.message,
errorStack: err.stack,
};
log.error('error listing objects', logObj);
cb(errors.InternalError);
}
})
.on('end', () => {
if (!cbDone) {
cbDone = true;
const data = extension.result();
cb(null, data);
}
});
return undefined;
});
return undefined;
});
}
listObject(bucketName, params, log, cb) {
return this.internalListObject(bucketName, params, log, cb);
}
listMultipartUploads(bucketName, params, log, cb) {
return this.internalListObject(bucketName, params, log, cb);
}
getUUID(log, cb) {
return this.mdDB.getUUID(cb);
}
getDiskUsage(cb) {
return this.mdDB.getDiskUsage(cb);
}
countItems(log, cb) {
if (this.lastItemScanTime !== null &&
(Date.now() - this.lastItemScanTime) <= itemScanRefreshDelay) {
return process.nextTick(cb, null, this.lastItemScanResult);
}
const params = {};
const extension = new list.Basic(params, log);
const requestParams = extension.genMDParams();
const res = {
objects: 0,
versions: 0,
buckets: 0,
bucketList: [],
};
let cbDone = false;
this.mdDB.rawListKeys(requestParams, (err, stream) => {
if (err) {
return cb(err);
}
stream
.on('data', e => {
if (!e.includes(METASTORE)) {
if (e.includes(this.constants.usersBucket)) {
res.buckets++;
res.bucketList.push({
name: e.split(this.constants.splitter)[1],
});
} else if (e.includes(versionSep)) {
res.versions++;
} else if (!e.includes('..recordLogs#s3-recordlog')) {
res.objects++;
}
}
})
.on('error', err => {
if (!cbDone) {
cbDone = true;
const logObj = {
error: err,
errorMessage: err.message,
errorStack: err.stack,
};
log.error('error listing objects', logObj);
cb(errors.InternalError);
}
})
.on('end', () => {
if (!cbDone) {
cbDone = true;
async.eachSeries(res.bucketList, (bucket, cb) => {
this.getBucketAttributes(bucket.name, log,
(err, bucketInfo) => {
if (err) {
return cb(err);
}
/* eslint-disable no-param-reassign */
bucket.location =
bucketInfo.getLocationConstraint();
/* eslint-enable no-param-reassign */
return cb();
});
}, err => {
if (!err) {
this.lastItemScanTime = Date.now();
this.lastItemScanResult = res;
}
return cb(err, res);
});
}
return undefined;
});
return undefined;
});
return undefined;
}
}
module.exports = BucketFileInterface;

View File

@ -58,7 +58,8 @@ class MetadataFileClient {
logger: this.logger, logger: this.logger,
callTimeoutMs: this.callTimeoutMs, callTimeoutMs: this.callTimeoutMs,
}); });
return dbClient.connect(() => done(null, dbClient)); dbClient.connect(done);
return dbClient;
} }
/** /**

View File

@ -1,32 +0,0 @@
const ListResult = require('./ListResult');
class ListMultipartUploadsResult extends ListResult {
constructor() {
super();
this.Uploads = [];
this.NextKeyMarker = undefined;
this.NextUploadIdMarker = undefined;
}
addUpload(uploadInfo) {
this.Uploads.push({
key: decodeURIComponent(uploadInfo.key),
value: {
UploadId: uploadInfo.uploadId,
Initiator: {
ID: uploadInfo.initiatorID,
DisplayName: uploadInfo.initiatorDisplayName,
},
Owner: {
ID: uploadInfo.ownerID,
DisplayName: uploadInfo.ownerDisplayName,
},
StorageClass: uploadInfo.storageClass,
Initiated: uploadInfo.initiated,
},
});
this.MaxKeys += 1;
}
}
module.exports = ListMultipartUploadsResult;

View File

@ -1,27 +0,0 @@
class ListResult {
constructor() {
this.IsTruncated = false;
this.NextMarker = undefined;
this.CommonPrefixes = [];
/*
Note: this.MaxKeys will get incremented as
keys are added so that when response is returned,
this.MaxKeys will equal total keys in response
(with each CommonPrefix counting as 1 key)
*/
this.MaxKeys = 0;
}
addCommonPrefix(prefix) {
if (!this.hasCommonPrefix(prefix)) {
this.CommonPrefixes.push(prefix);
this.MaxKeys += 1;
}
}
hasCommonPrefix(prefix) {
return (this.CommonPrefixes.indexOf(prefix) !== -1);
}
}
module.exports = ListResult;

View File

@ -1,62 +0,0 @@
# bucket_mem design
## RATIONALE
The bucket API will be used for managing buckets behind the S3 interface.
We plan to have only 2 backends using this interface:
* One production backend
* One debug backend purely in memory
One important remark here is that we don't want an abstraction but a
duck-typing style interface (different classes MemoryBucket and Bucket having
the same methods putObjectMD(), getObjectMD(), etc).
Notes about the memory backend: The backend is currently a simple key/value
store in memory. The functions actually use nextTick() to emulate the future
asynchronous behavior of the production backend.
## BUCKET API
The bucket API is a very simple API with 5 functions:
- putObjectMD(): put metadata for an object in the bucket
- getObjectMD(): get metadata from the bucket
- deleteObjectMD(): delete metadata for an object from the bucket
- deleteBucketMD(): delete a bucket
- getBucketListObjects(): perform the complex bucket listing AWS search
function with various flavors. This function returns a response in a
ListBucketResult object.
getBucketListObjects(prefix, marker, delimiter, maxKeys, callback) behavior is
the following:
prefix (not required): Limits the response to keys that begin with the
specified prefix. You can use prefixes to separate a bucket into different
groupings of keys. (You can think of using prefix to make groups in the same
way you'd use a folder in a file system.)
marker (not required): Specifies the key to start with when listing objects in
a bucket. Amazon S3 returns object keys in alphabetical order, starting with
key after the marker in order.
delimiter (not required): A delimiter is a character you use to group keys.
All keys that contain the same string between the prefix, if specified, and the
first occurrence of the delimiter after the prefix are grouped under a single
result element, CommonPrefixes. If you don't specify the prefix parameter, then
the substring starts at the beginning of the key. The keys that are grouped
under CommonPrefixes are not returned elsewhere in the response.
maxKeys: Sets the maximum number of keys returned in the response body. You can
add this to your request if you want to retrieve fewer than the default 1000
keys. The response might contain fewer keys but will never contain more. If
there are additional keys that satisfy the search criteria but were not
returned because maxKeys was exceeded, the response contains an attribute of
IsTruncated set to true and a NextMarker. To return the additional keys, call
the function again using NextMarker as your marker argument in the function.
Any key that does not contain the delimiter will be returned individually in
Contents rather than in CommonPrefixes.
If there is an error, the error subfield is returned in the response.

View File

@ -1,34 +0,0 @@
function markerFilterMPU(allMarkers, array) {
const { keyMarker, uploadIdMarker } = allMarkers;
// 1. if the item key matches the keyMarker and an uploadIdMarker exists,
// find the first uploadId in the array that is alphabetically after
// uploadIdMarker
// 2. if the item key does not match the keyMarker, find the first uploadId
// in the array that is alphabetically after keyMarker
const firstUnfilteredIndex = array.findIndex(
item => (uploadIdMarker && item.key === keyMarker ?
item.uploadId > uploadIdMarker :
item.key > keyMarker));
return firstUnfilteredIndex !== -1 ? array.slice(firstUnfilteredIndex) : [];
}
function prefixFilter(prefix, array) {
for (let i = 0; i < array.length; i++) {
if (array[i].indexOf(prefix) !== 0) {
array.splice(i, 1);
i--;
}
}
return array;
}
function isKeyInContents(responseObject, key) {
return responseObject.Contents.some(val => val.key === key);
}
module.exports = {
markerFilterMPU,
prefixFilter,
isKeyInContents,
};

View File

@ -1,148 +0,0 @@
const errors = require('../../../errors');
const { markerFilterMPU, prefixFilter } = require('./bucket_utilities');
const ListMultipartUploadsResult = require('./ListMultipartUploadsResult');
const { metadata } = require('./metadata');
const defaultMaxKeys = 1000;
function getMultipartUploadListing(bucket, params, callback) {
const { delimiter, keyMarker,
uploadIdMarker, prefix, queryPrefixLength, splitter } = params;
const splitterLen = splitter.length;
const maxKeys = params.maxKeys !== undefined ?
Number.parseInt(params.maxKeys, 10) : defaultMaxKeys;
const response = new ListMultipartUploadsResult();
const keyMap = metadata.keyMaps.get(bucket.getName());
if (prefix) {
response.Prefix = prefix;
if (typeof prefix !== 'string') {
return callback(errors.InvalidArgument);
}
}
if (keyMarker) {
response.KeyMarker = keyMarker;
if (typeof keyMarker !== 'string') {
return callback(errors.InvalidArgument);
}
}
if (uploadIdMarker) {
response.UploadIdMarker = uploadIdMarker;
if (typeof uploadIdMarker !== 'string') {
return callback(errors.InvalidArgument);
}
}
if (delimiter) {
response.Delimiter = delimiter;
if (typeof delimiter !== 'string') {
return callback(errors.InvalidArgument);
}
}
if (maxKeys && typeof maxKeys !== 'number') {
return callback(errors.InvalidArgument);
}
// Sort uploads alphatebetically by objectKey and if same objectKey,
// then sort in ascending order by time initiated
let uploads = [];
keyMap.forEach((val, key) => {
uploads.push(key);
});
uploads.sort((a, b) => {
const aIndex = a.indexOf(splitter);
const bIndex = b.indexOf(splitter);
const aObjectKey = a.substring(aIndex + splitterLen);
const bObjectKey = b.substring(bIndex + splitterLen);
const aInitiated = keyMap.get(a).initiated;
const bInitiated = keyMap.get(b).initiated;
if (aObjectKey === bObjectKey) {
if (Date.parse(aInitiated) >= Date.parse(bInitiated)) {
return 1;
}
if (Date.parse(aInitiated) < Date.parse(bInitiated)) {
return -1;
}
}
return (aObjectKey < bObjectKey) ? -1 : 1;
});
// Edit the uploads array so it only
// contains keys that contain the prefix
uploads = prefixFilter(prefix, uploads);
uploads = uploads.map(stringKey => {
const index = stringKey.indexOf(splitter);
const index2 = stringKey.indexOf(splitter, index + splitterLen);
const storedMD = keyMap.get(stringKey);
return {
key: stringKey.substring(index + splitterLen, index2),
uploadId: stringKey.substring(index2 + splitterLen),
bucket: storedMD.eventualStorageBucket,
initiatorID: storedMD.initiator.ID,
initiatorDisplayName: storedMD.initiator.DisplayName,
ownerID: storedMD['owner-id'],
ownerDisplayName: storedMD['owner-display-name'],
storageClass: storedMD['x-amz-storage-class'],
initiated: storedMD.initiated,
};
});
// If keyMarker specified, edit the uploads array so it
// only contains keys that occur alphabetically after the marker.
// If there is also an uploadIdMarker specified, filter to eliminate
// any uploads that share the keyMarker and have an uploadId before
// the uploadIdMarker.
if (keyMarker) {
const allMarkers = {
keyMarker,
uploadIdMarker,
};
uploads = markerFilterMPU(allMarkers, uploads);
}
// Iterate through uploads and filter uploads
// with keys containing delimiter
// into response.CommonPrefixes and filter remaining uploads
// into response.Uploads
for (let i = 0; i < uploads.length; i++) {
const currentUpload = uploads[i];
// If hit maxKeys, stop adding keys to response
if (response.MaxKeys >= maxKeys) {
response.IsTruncated = true;
break;
}
// If a delimiter is specified, find its
// index in the current key AFTER THE OCCURRENCE OF THE PREFIX
// THAT WAS SENT IN THE QUERY (not the prefix including the splitter
// and other elements)
let delimiterIndexAfterPrefix = -1;
const currentKeyWithoutPrefix =
currentUpload.key.slice(queryPrefixLength);
let sliceEnd;
if (delimiter) {
delimiterIndexAfterPrefix = currentKeyWithoutPrefix
.indexOf(delimiter);
sliceEnd = delimiterIndexAfterPrefix + queryPrefixLength;
}
// If delimiter occurs in current key, add key to
// response.CommonPrefixes.
// Otherwise add upload to response.Uploads
if (delimiterIndexAfterPrefix > -1) {
const keySubstring = currentUpload.key.slice(0, sliceEnd + 1);
response.addCommonPrefix(keySubstring);
} else {
response.NextKeyMarker = currentUpload.key;
response.NextUploadIdMarker = currentUpload.uploadId;
response.addUpload(currentUpload);
}
}
// `response.MaxKeys` should be the value from the original `MaxUploads`
// parameter specified by the user (or else the default 1000). Redefine it
// here, so it does not equal the value of `uploads.length`.
response.MaxKeys = maxKeys;
// If `response.MaxKeys` is 0, `response.IsTruncated` should be `false`.
response.IsTruncated = maxKeys === 0 ? false : response.IsTruncated;
return callback(null, response);
}
module.exports = getMultipartUploadListing;

View File

@ -1,8 +0,0 @@
const metadata = {
buckets: new Map,
keyMaps: new Map,
};
module.exports = {
metadata,
};

View File

@ -1,333 +0,0 @@
const errors = require('../../../errors');
const list = require('../../../algos/list/exportAlgos');
const genVID =
require('../../../versioning/VersionID').generateVersionId;
const getMultipartUploadListing = require('./getMultipartUploadListing');
const { metadata } = require('./metadata');
// const genVID = versioning.VersionID.generateVersionId;
const defaultMaxKeys = 1000;
let uidCounter = 0;
function generateVersionId(replicationGroupId) {
return genVID(uidCounter++, replicationGroupId);
}
function formatVersionKey(key, versionId) {
return `${key}\0${versionId}`;
}
function inc(str) {
return str ? (str.slice(0, str.length - 1) +
String.fromCharCode(str.charCodeAt(str.length - 1) + 1)) : str;
}
const metastore = {
createBucket: (bucketName, bucketMD, log, cb) => {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, (err, bucket) => {
// TODO Check whether user already owns the bucket,
// if so return "BucketAlreadyOwnedByYou"
// If not owned by user, return "BucketAlreadyExists"
if (bucket) {
return cb(errors.BucketAlreadyExists);
}
metadata.buckets.set(bucketName, bucketMD);
metadata.keyMaps.set(bucketName, new Map);
return cb();
});
});
},
putBucketAttributes: (bucketName, bucketMD, log, cb) => {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, err => {
if (err) {
return cb(err);
}
metadata.buckets.set(bucketName, bucketMD);
return cb();
});
});
},
getBucketAttributes: (bucketName, log, cb) => {
process.nextTick(() => {
if (!metadata.buckets.has(bucketName)) {
return cb(errors.NoSuchBucket);
}
return cb(null, metadata.buckets.get(bucketName));
});
},
deleteBucket: (bucketName, log, cb) => {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, err => {
if (err) {
return cb(err);
}
if (metadata.keyMaps.has(bucketName)
&& metadata.keyMaps.get(bucketName).length > 0) {
return cb(errors.BucketNotEmpty);
}
metadata.buckets.delete(bucketName);
metadata.keyMaps.delete(bucketName);
return cb(null);
});
});
},
putObject: (bucketName, objName, objVal, params, log, cb) => {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, err => {
if (err) {
return cb(err);
}
/*
valid combinations of versioning options:
- !versioning && !versionId: normal non-versioning put
- versioning && !versionId: create a new version
- versionId: update (PUT/DELETE) an existing version,
and also update master version in case the put
version is newer or same version than master.
if versionId === '' update master version
*/
if (params && params.versionId) {
objVal.versionId = params.versionId; // eslint-disable-line
const mst = metadata.keyMaps.get(bucketName).get(objName);
if (mst && mst.versionId === params.versionId || !mst) {
metadata.keyMaps.get(bucketName).set(objName, objVal);
}
// eslint-disable-next-line
objName = formatVersionKey(objName, params.versionId);
metadata.keyMaps.get(bucketName).set(objName, objVal);
return cb(null, `{"versionId":"${objVal.versionId}"}`);
}
if (params && params.versioning) {
const versionId = generateVersionId();
objVal.versionId = versionId; // eslint-disable-line
metadata.keyMaps.get(bucketName).set(objName, objVal);
// eslint-disable-next-line
objName = formatVersionKey(objName, versionId);
metadata.keyMaps.get(bucketName).set(objName, objVal);
return cb(null, `{"versionId":"${versionId}"}`);
}
if (params && params.versionId === '') {
const versionId = generateVersionId();
objVal.versionId = versionId; // eslint-disable-line
metadata.keyMaps.get(bucketName).set(objName, objVal);
return cb(null, `{"versionId":"${objVal.versionId}"}`);
}
metadata.keyMaps.get(bucketName).set(objName, objVal);
return cb(null);
});
});
},
getBucketAndObject: (bucketName, objName, params, log, cb) => {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, (err, bucket) => {
if (err) {
return cb(err, { bucket });
}
if (params && params.versionId) {
// eslint-disable-next-line
objName = formatVersionKey(objName, params.versionId);
}
if (!metadata.keyMaps.has(bucketName)
|| !metadata.keyMaps.get(bucketName).has(objName)) {
return cb(null, { bucket: bucket.serialize() });
}
return cb(null, {
bucket: bucket.serialize(),
obj: JSON.stringify(
metadata.keyMaps.get(bucketName).get(objName)
),
});
});
});
},
getObject: (bucketName, objName, params, log, cb) => {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, err => {
if (err) {
return cb(err);
}
if (params && params.versionId) {
// eslint-disable-next-line
objName = formatVersionKey(objName, params.versionId);
}
if (!metadata.keyMaps.has(bucketName)
|| !metadata.keyMaps.get(bucketName).has(objName)) {
return cb(errors.NoSuchKey);
}
return cb(null, metadata.keyMaps.get(bucketName).get(objName));
});
});
},
deleteObject: (bucketName, objName, params, log, cb) => {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, err => {
if (err) {
return cb(err);
}
if (!metadata.keyMaps.get(bucketName).has(objName)) {
return cb(errors.NoSuchKey);
}
if (params && params.versionId) {
const baseKey = inc(formatVersionKey(objName, ''));
const vobjName = formatVersionKey(objName,
params.versionId);
metadata.keyMaps.get(bucketName).delete(vobjName);
const mst = metadata.keyMaps.get(bucketName).get(objName);
if (mst.versionId === params.versionId) {
const keys = [];
metadata.keyMaps.get(bucketName).forEach((val, key) => {
if (key < baseKey && key > vobjName) {
keys.push(key);
}
});
if (keys.length === 0) {
metadata.keyMaps.get(bucketName).delete(objName);
return cb();
}
const key = keys.sort()[0];
const value = metadata.keyMaps.get(bucketName).get(key);
metadata.keyMaps.get(bucketName).set(objName, value);
}
return cb();
}
metadata.keyMaps.get(bucketName).delete(objName);
return cb();
});
});
},
_hasDeleteMarker(key, keyMap) {
const objectMD = keyMap.get(key);
if (objectMD['x-amz-delete-marker'] !== undefined) {
return (objectMD['x-amz-delete-marker'] === true);
}
return false;
},
listObject(bucketName, params, log, cb) {
process.nextTick(() => {
const {
prefix,
marker,
delimiter,
maxKeys,
continuationToken,
startAfter,
} = params;
if (prefix && typeof prefix !== 'string') {
return cb(errors.InvalidArgument);
}
if (marker && typeof marker !== 'string') {
return cb(errors.InvalidArgument);
}
if (delimiter && typeof delimiter !== 'string') {
return cb(errors.InvalidArgument);
}
if (maxKeys && typeof maxKeys !== 'number') {
return cb(errors.InvalidArgument);
}
if (continuationToken && typeof continuationToken !== 'string') {
return cb(errors.InvalidArgument);
}
if (startAfter && typeof startAfter !== 'string') {
return cb(errors.InvalidArgument);
}
// If paramMaxKeys is undefined, the default parameter will set it.
// However, if it is null, the default parameter will not set it.
let numKeys = maxKeys;
if (numKeys === null) {
numKeys = defaultMaxKeys;
}
if (!metadata.keyMaps.has(bucketName)) {
return cb(errors.NoSuchBucket);
}
// If marker specified, edit the keys array so it
// only contains keys that occur alphabetically after the marker
const listingType = params.listingType;
const extension = new list[listingType](params, log);
const listingParams = extension.genMDParams();
const keys = [];
metadata.keyMaps.get(bucketName).forEach((val, key) => {
if (listingParams.gt && listingParams.gt >= key) {
return null;
}
if (listingParams.gte && listingParams.gte > key) {
return null;
}
if (listingParams.lt && key >= listingParams.lt) {
return null;
}
if (listingParams.lte && key > listingParams.lte) {
return null;
}
return keys.push(key);
});
keys.sort();
// Iterate through keys array and filter keys containing
// delimiter into response.CommonPrefixes and filter remaining
// keys into response.Contents
for (let i = 0; i < keys.length; ++i) {
const currentKey = keys[i];
// Do not list object with delete markers
if (this._hasDeleteMarker(currentKey,
metadata.keyMaps.get(bucketName))) {
continue;
}
const objMD = metadata.keyMaps.get(bucketName).get(currentKey);
const value = JSON.stringify(objMD);
const obj = {
key: currentKey,
value,
};
// calling Ext.filter(obj) adds the obj to the Ext result if
// not filtered.
// Also, Ext.filter returns false when hit max keys.
// What a nifty function!
if (extension.filter(obj) < 0) {
break;
}
}
return cb(null, extension.result());
});
},
listMultipartUploads(bucketName, listingParams, log, cb) {
process.nextTick(() => {
metastore.getBucketAttributes(bucketName, log, (err, bucket) => {
if (bucket === undefined) {
// no on going multipart uploads, return empty listing
return cb(null, {
IsTruncated: false,
NextMarker: undefined,
MaxKeys: 0,
});
}
return getMultipartUploadListing(bucket, listingParams, cb);
});
});
},
};
module.exports = metastore;

View File

@ -1,274 +0,0 @@
const NEW_OBJ = 0;
const NEW_VER = 1;
const UPDATE_VER = 2;
const UPDATE_MST = 3;
const RESTORE = 4;
const DEL_VER = 0;
const DEL_MST = 1;
const CURR = 'curr';
const PREV = 'prev';
function deepCopyObject(obj) {
return JSON.parse(JSON.stringify(obj));
}
class DataCounter {
/**
* DataCounter - class for keeping track of the ItemCount metrics
* @return {DataCounter} DataCounter object
*/
constructor() {
this.objects = 0;
this.versions = 0;
this.dataManaged = {
total: { curr: 0, prev: 0 },
byLocation: {},
};
this.stalled = 0;
this.populated = false;
this.transientList = {};
}
/**
* updateTransientList - update data counter list of transient locations
* @param {Object} newLocations - list of locations constraint details
* @return {undefined}
*/
updateTransientList(newLocations) {
if (newLocations && Object.keys(newLocations).length > 0) {
const tempList = {};
Object.keys(newLocations).forEach(loc => {
tempList[loc] = newLocations[loc].isTransient;
});
this.transientList = tempList;
}
}
/**
* set - set DataCounter values
* @param {Object} setVal - object containing values to be used for setting
* DataCounter
* @param {number} setVal.objects - number of master objects
* @param {number} setVal.versions - number of versioned objects
* @param {Object} setVal.dataManaged - object containing information about
* all the data managed
* @param {Object} setVal.total - object containing the total byte count of
* data managed
* @param {number} setVal.total.curr - the total byte count of master
* objects
* @param {number} setVal.total.prev - the total byte count of versioned
* objects
* @param {Object} setVal.byLocaton - object containing the information
* about data managed on each location
* @return {undefined}
*/
set(setVal) {
if (setVal) {
this.objects = setVal.objects;
this.versions = setVal.versions;
this.dataManaged = deepCopyObject(setVal.dataManaged);
this.populated = true;
this.stalled = setVal.stalled;
}
}
/**
* results - creates a deep copy of the current DataCounter values
* @return {Object} - object containing the current DataCounter values
*/
results() {
const obj = {
objects: this.objects,
versions: this.versions,
dataManaged: this.dataManaged,
stalled: this.stalled,
};
return deepCopyObject(obj);
}
/**
* addObjectFn - performing add operations
* @param {ObjectMD} currMD - new master version metadata
* @param {ObjectMD} prevMD - old master version metadata
* @param {number} type - index of the current type of add operation
* @return {undefined}
*/
addObject(currMD, prevMD, type) {
if (type !== undefined && type !== null && this.populated) {
switch (type) {
case NEW_OBJ: // add new object, replace master if needed
if (prevMD) {
this._delValue(prevMD, CURR);
this._addValue(currMD, CURR);
} else {
++this.objects;
this._addValue(currMD, CURR);
}
break;
case NEW_VER: // add new object, archive master
++this.versions;
this._delValue(prevMD, CURR);
this._addValue(prevMD, PREV);
this._addValue(currMD, CURR);
break;
case UPDATE_VER: // update archived object, replication info
this._updateObject(currMD, prevMD, PREV);
break;
case UPDATE_MST: // update master object, replication info
this._updateObject(currMD, prevMD, CURR);
break;
case RESTORE:
--this.versions;
this._delValue(currMD, PREV);
++this.objects;
this._addValue(currMD, CURR);
break;
default:
// should throw error, noop
break;
}
}
}
/**
* delObjectFn - performing del operations
* @param {ObjectMD} currMD - object metadata
* @param {number} type - index of the current type of delete operation
* @return {undefined}
*/
delObject(currMD, type) {
if (type !== undefined && type !== null && this.populated) {
switch (type) {
case DEL_VER:
--this.versions;
this._delValue(currMD, PREV);
break;
case DEL_MST:
--this.objects;
this._delValue(currMD, CURR);
break;
default:
// should throw error, noop
break;
}
}
}
_addLocation(site, size, type) {
this.dataManaged.total[type] += size;
if (!this.dataManaged.byLocation[site]) {
this.dataManaged.byLocation[site] = {
curr: 0,
prev: 0,
};
}
this.dataManaged.byLocation[site][type] += size;
}
/**
* _addValue - helper function for handling put object updates
* @param {ObjectMD} objMD - object metadata
* @param {string} type - string with value either 'curr' or 'prev'
* @return {undefined}
*/
_addValue(objMD, type) {
if (objMD) {
const { replicationInfo, 'content-length': size } = objMD;
const { backends } = replicationInfo || {};
this._addLocation(objMD.dataStoreName, size, type);
if (backends && Array.isArray(backends)) {
backends.forEach(loc => {
const { site, status } = loc;
if (status === 'COMPLETED') {
this._addLocation(site, size, type);
}
});
}
}
}
/**
* _updateObject - helper function for handling updates from replication
* info changes
* @param {ObjectMD} currMD - new object metadata
* @param {ObjectMD} prevMD - old object metadata
* @param {string} type - string with value either 'curr' or 'prev'
* @return {undefined}
*/
_updateObject(currMD, prevMD, type) {
const transientList = Object.assign({}, this.transientList);
if (currMD && prevMD) {
// check for changes in replication
const { replicationInfo: currLocs,
'content-length': size, dataStoreName } = currMD;
const { replicationInfo: prevLocs } = prevMD;
const { backends: prevBackends } = prevLocs || {};
const { backends: currBackends } = currLocs || {};
const oldLocs = {};
if (prevBackends && Array.isArray(prevBackends)) {
prevBackends.forEach(loc => {
const { site, status } = loc;
oldLocs[site] = status;
});
}
if (currBackends && Array.isArray(currBackends)) {
currBackends.forEach(loc => {
const { site, status } = loc;
if (site in oldLocs && status === 'COMPLETED' &&
oldLocs[site] !== status) {
this._addLocation(site, size, type);
}
});
}
if (currLocs.status === 'COMPLETED' &&
transientList[dataStoreName]) {
this._delLocation(dataStoreName, size, type);
}
}
}
_delLocation(site, size, type) {
if (this.dataManaged.byLocation[site]) {
this.dataManaged.total[type] -= size;
this.dataManaged.total[type] =
Math.max(0, this.dataManaged.total[type]);
this.dataManaged.byLocation[site][type] -= size;
this.dataManaged.byLocation[site][type] =
Math.max(0, this.dataManaged.byLocation[site][type]);
}
}
/**
* _delValue - helper function for handling delete object operations
* @param {ObjectMD} objMD - object metadata
* @param {string} type - string with value either 'curr' or 'prev'
* @return {undefined}
*/
_delValue(objMD, type) {
if (objMD) {
const { replicationInfo, 'content-length': size } = objMD;
const { backends } = replicationInfo || {};
this._delLocation(objMD.dataStoreName, size, type);
if (backends && Array.isArray(backends)) {
backends.forEach(loc => {
const { site, status } = loc;
if (status === 'COMPLETED') {
this._delLocation(site, size, type);
}
});
}
}
}
}
module.exports = {
NEW_OBJ,
NEW_VER,
UPDATE_VER,
UPDATE_MST,
RESTORE,
DEL_VER,
DEL_MST,
DataCounter,
};

View File

@ -1,172 +0,0 @@
const stream = require('stream');
/**
* @class ListRecordStream
* @classdesc Filter and stream records returned from a mongodb query
* cursor
*/
class ListRecordStream extends stream.Readable {
/**
* @constructor
* @param {mongodb.Cursor} mongoCursor - cursor returned by a
* mongodb query to the oplog (see
* http://mongodb.github.io/node-mongodb-native/2.0/api/Cursor.html)
* @param {werelogs.Logger} logger - logger object
* @param {string} lastSavedID - unique ID that has been persisted
* of the most recently processed entry in the oplog
* @param {string} latestOplogID - unique ID of the most recently
* added entry in the oplog
*/
constructor(mongoCursor, logger, lastSavedID, latestOplogID) {
super({ objectMode: true });
this._cursor = mongoCursor;
this._logger = logger;
this._lastSavedID = lastSavedID;
this._latestOplogID = latestOplogID;
this._lastConsumedID = null;
// this._unpublishedListing is true once we pass the oplog
// record that has the same uniqID 'h' than last saved. If we
// don't find it (e.g. log rolled over before populator could
// process its oldest entries), we will restart from the
// latest record of the oplog.
this._unpublishedListing = false;
// cf. this.getSkipCount()
this._skipCount = 0;
}
_read() {
// MongoDB cursors provide a stream interface. We choose not
// to use it though because errors may not be emitted by the
// stream when there is an issue with the connection to
// MongoDB (especially when pause()/resume() are used).
//
// Instead we use the async cursor.next() call directly to
// fetch records one at a time, errors are then forwarded in
// the callback.
this._cursor.next((err, item) => {
if (err) {
this._logger.error('mongodb cursor error', {
method: 'mongoclient.ListRecordStream._read()',
error: err.message,
});
this.emit('error', err);
return undefined;
}
if (this._processItem(item)) {
return process.nextTick(this._read.bind(this));
}
// wait until _read() gets called again
return undefined;
});
}
_processItem(itemObj) {
// always update to most recent uniqID
this._lastConsumedID = itemObj.h.toString();
// only push to stream unpublished objects
if (!this._lastSavedID) {
// process from the first entry
this._unpublishedListing = true;
} else if (!this._unpublishedListing) {
// When an oplog with a unique ID that is stored in the
// log offset is found, all oplogs AFTER this is unpublished.
if (this._lastSavedID === this._lastConsumedID) {
this._unpublishedListing = true;
} else if (this._latestOplogID === this._lastConsumedID) {
this._logger.warn(
'did not encounter the last saved offset in oplog, ' +
'resuming processing right after the latest record ' +
'to date; some entries may have been skipped', {
lastSavedID: this._lastSavedID,
latestRecordID: this._latestOplogID,
});
this._unpublishedListing = true;
}
++this._skipCount;
return true; // read next record
}
const dbName = itemObj.ns.slice(itemObj.ns.indexOf('.') + 1);
let entry;
if (itemObj.op === 'i' &&
itemObj.o && itemObj.o._id) {
entry = {
type: 'put',
key: itemObj.o._id,
// value is given as-is for inserts
value: JSON.stringify(itemObj.o.value),
};
} else if (itemObj.op === 'u' &&
itemObj.o && itemObj.o2 && itemObj.o2._id) {
entry = {
type: 'put', // updates overwrite the whole metadata,
// so they are considered as puts
key: itemObj.o2._id,
// updated value may be either stored directly in 'o'
// attribute or in '$set' attribute (supposedly when
// the object pre-exists it will be in '$set')
value: JSON.stringify(
(itemObj.o.$set ? itemObj.o.$set : itemObj.o).value),
};
} else if (itemObj.op === 'd' &&
itemObj.o && itemObj.o._id) {
entry = {
type: 'delete',
key: itemObj.o._id,
// deletion yields no value
};
} else {
// skip other entry types as we don't need them for now
// ('c', ...?)
++this._skipCount;
return true; // read next record
}
const streamObject = {
timestamp: new Date((itemObj.ts ?
itemObj.ts.toNumber() * 1000 : 0)),
db: dbName,
entries: [entry],
};
// push object to the stream, then return false to wait until
// _read() is called again (because we are in an asynchronous
// context already)
this.push(streamObject);
return false;
}
/**
* Get an opaque JSON blob containing the latest consumed offset
* from MongoDB oplog.
*
* @return {string} opaque JSON blob
*/
getOffset() {
return JSON.stringify({
uniqID: this._lastConsumedID,
});
}
/**
* Get the number of entries that have been read and skipped from
* MongoDB oplog since the ListRecordStream instance was created.
*
* @return {integer} number of skipped entries
*/
getSkipCount() {
return this._skipCount;
}
/**
* Get whether the stream reached yet-unpublished records
* (i.e. after we reached either the saved unique ID, or the tip
* of the oplog)
*
* @return {boolean} true if we are now returning unpublished records
*/
reachedUnpublishedListing() {
return this._unpublishedListing;
}
}
module.exports = ListRecordStream;

View File

@ -1,121 +0,0 @@
'use strict'; // eslint-disable-line
const MongoClient = require('mongodb').MongoClient;
const ListRecordStream = require('./ListRecordStream');
/**
* @class
* @classdesc Class to consume mongo oplog
*/
class LogConsumer {
/**
* @constructor
*
* @param {object} mongoConfig - object with the mongo configuration
* @param {string} logger - logger
*/
constructor(mongoConfig, logger) {
const { replicaSetHosts, database } = mongoConfig;
this._mongoUrl = `mongodb://${replicaSetHosts}/local`;
this._logger = logger;
this._oplogNsRegExp = new RegExp(`^${database}\\.`);
// oplog collection
this._coll = null;
}
/**
* Connect to MongoClient using Mongo node module to access database and
* database oplogs (operation logs)
*
* @param {function} done - callback function, called with an error object
* or null and an object as 2nd parameter
* @return {undefined}
*/
connectMongo(done) {
MongoClient.connect(this._mongoUrl, {
replicaSet: 'rs0',
useNewUrlParser: true,
},
(err, client) => {
if (err) {
this._logger.error('Unable to connect to MongoDB',
{ error: err });
return done(err);
}
this._logger.info('connected to mongodb');
// 'local' is the database where MongoDB has oplog.rs
// capped collection
const db = client.db('local', {
ignoreUndefined: true,
});
this._coll = db.collection('oplog.rs');
return done();
});
}
/**
* Open a tailable cursor to mongo oplog and retrieve a stream of
* records to read
*
* @param {Object} [params] - params object
* @param {String} [params.startSeq] - fetch starting from this
* opaque offset returned previously by mongo ListRecordStream
* in an 'info' event
* @param {function} cb - callback function, called with an error
* object or null and an object as 2nd parameter
*
* @return {undefined}
*/
readRecords(params, cb) {
let startSeq = {};
if (params.startSeq) {
try {
// parse the opaque JSON string passed through from a
// previous 'info' event
startSeq = JSON.parse(params.startSeq);
} catch (err) {
this._logger.error('malformed startSeq', {
startSeq: params.startSeq,
});
// start over if malformed
}
}
this._readLatestOplogID((err, latestOplogID) => {
if (err) {
return cb(err);
}
return this._coll.find({
ns: this._oplogNsRegExp,
}, {
tailable: true,
awaitData: true,
noCursorTimeout: true,
numberOfRetries: Number.MAX_VALUE,
}, (err, cursor) => {
const recordStream = new ListRecordStream(
cursor, this._logger, startSeq.uniqID, latestOplogID);
return cb(null, { log: recordStream, tailable: true });
});
});
}
_readLatestOplogID(cb) {
this._coll.find({
ns: this._oplogNsRegExp,
}, {
ts: 1,
}).sort({
$natural: -1,
}).limit(1).toArray((err, data) => {
if (err) {
return cb(err);
}
const latestOplogID = data[0].h.toString();
this._logger.debug('latest oplog ID read', { latestOplogID });
return cb(null, latestOplogID);
});
}
}
module.exports = LogConsumer;

File diff suppressed because it is too large Load Diff

View File

@ -1,173 +0,0 @@
# Mongoclient
We introduce a new metadata backend called *mongoclient* for
[MongoDB](https://www.mongodb.com). This backend takes advantage of
MongoDB being a document store to store the metadata (bucket and
object attributes) as JSON objects.
## Overall Design
The mongoclient backend strictly follows the metadata interface that
stores bucket and object attributes, which consists of the methods
createBucket(), getBucketAttributes(), getBucketAndObject()
(attributes), putBucketAttributes(), deleteBucket(), putObject(),
getObject(), deleteObject(), listObject(), listMultipartUploads() and
the management methods getUUID(), getDiskUsage() and countItems(). The
mongoclient backend also knows how to deal with versioning, it is also
compatible with the various listing algorithms implemented in Arsenal.
FIXME: There should be a document describing the metadata (currently
duck-typing) interface.
### Why Using MongoDB for Storing Bucket and Object Attributes
We chose MongoDB for various reasons:
- MongoDB supports replication, especially through the Raft protocol.
- MongoDB supports a basic replication scheme called 'Replica Set' and
more advanced sharding schemes if required.
- MongoDB is open source and an enterprise standard.
- MongoDB is a document store (natively supports JSON) and supports a
very flexible search interface.
### Choice of Mongo Client Library
We chose to use the official MongoDB driver for NodeJS:
[https://github.com/mongodb/node-mongodb-native](https://github.com/mongodb/node-mongodb-native)
### Granularity for Buckets
We chose to have one collection for one bucket mapping. First because
in a simple mode of replication called 'replica set' it works from the
get-go, but if one or many buckets grow to big it is possible to use
more advanced schemes such as sharding. MongoDB supports a mix of
sharded and non-sharded collections.
### Storing Database Information
We need a special collection called the *Infostore* (stored under the
name __infostore which is impossible to create through the S3 bucket
naming scheme) to store specific database properties such as the
unique *uuid* for Orbit.
### Storing Bucket Attributes
We need to use a special collection called the *Metastore* (stored
under the name __metastore which is impossible to create through the
S3 bucket naming scheme).
### Versioning Format
We chose to keep the same versioning format that we use in some other
Scality products in order to facilitate the compatibility between the
different products.
FIXME: Document the versioning internals in the upper layers and
document the versioning format
### Dealing with Concurrency
We chose not to use transactions (aka
[https://docs.mongodb.com/manual/tutorial/perform-two-phase-commits/)
because it is a known fact there is an overhead of using them, and we
thought there was no real need for them since we could leverage Mongo
ordered operations guarantees and atomic writes.
Example of corner cases:
#### CreateBucket()
Since it is not possible to create a collection AND at the same time
register the bucket in the Metastore we chose to only update the
Metastore. A non-existing collection (NamespaceNotFound error in
Mongo) is one possible normal state for an empty bucket.
#### DeleteBucket()
In this case the bucket is *locked* by the upper layers (use of a
transient delete flag) so we don't have to worry about that and by the
fact the bucket is empty neither (which is also checked by the upper
layers).
We first drop() the collection and then we asynchronously delete the
bucket name entry from the metastore (the removal from the metastore
is atomic which is not absolutely necessary in this case but more
robust in term of design).
If we fail in between we still have an entry in the metastore which is
good because we need to manage the delete flag. For the upper layers
the operation has not completed until this flag is removed. The upper
layers will restart the deleteBucket() which is fine because we manage
the case where the collection does not exist.
#### PutObject() with a Version
We need to store the versioned object then update the master object
(the latest version). For this we use the
[BulkWrite](http://mongodb.github.io/node-mongodb-native/3.0/api/Collection.html#bulkWrite)
method. This is not a transaction but guarantees that the 2 operations
will happen sequentially in the MongoDB oplog. Indeed if the
BulkWrite() fails in between we would end up creating an orphan (which
is not critical) but if the operation succeeds then we are sure that
the master is always pointing to the right object. If there is a
concurrency between 2 clients then we are sure that the 2 groups of
operations will be clearly decided in the oplog (the last writer will
win).
#### DeleteObject()
This is probably the most complex case to manage because it involves a
lot of different cases:
##### Deleting an Object when Versioning is not Enabled
This case is a straightforward atomic delete. Atomicity is not really
required because we assume version IDs are random enough but it is
more robust to do so.
##### Deleting an Object when Versioning is Enabled
This case is more complex since we have to deal with the 2 cases:
Case 1: The caller asks for a deletion of a version which is not a master:
This case is a straight-forward atomic delete.
Case 2: The caller asks for a deletion of a version which is the master: In
this case we need to create a special flag called PHD (as PlaceHolDer)
that indicates the master is no longer valid (with a new unique
virtual version ID). We force the ordering of operations in a
bulkWrite() to first replace the master with the PHD flag and then
physically delete the version. If the call fail in between we will be
left with a master with a PHD flag. If the call succeeds we try to
find if the master with the PHD flag is left alone in such case we
delete it otherwise we trigger an asynchronous repair that will spawn
after AYNC_REPAIR_TIMEOUT=15s that will reassign the master to the
latest version.
In all cases the physical deletion or the repair of the master are
checked against the PHD flag AND the actual unique virtual version
ID. We do this to check against potential concurrent deletions,
repairs or updates. Only the last writer/deleter has the right to
physically perform the operation, otherwise it is superseded by other
operations.
##### Getting an object with a PHD flag
If the caller is asking for the latest version of an object and the
PHD flag is set we perform a search on the bucket to find the latest
version and we return it.
#### Listing Objects
The mongoclient backend implements a readable key/value stream called
*MongoReadStream* that follows the LevelDB duck typing interface used
in Arsenal/lib/algos listing algorithms. Note it does not require any
LevelDB package.
#### Generating the UUID
To avoid race conditions we always (try to) generate a new UUID and we
condition the insertion to the non-existence of the document.

View File

@ -1,137 +0,0 @@
const Readable = require('stream').Readable;
const MongoUtils = require('./utils');
class MongoReadStream extends Readable {
constructor(c, options, searchOptions) {
super({
objectMode: true,
highWaterMark: 0,
});
if (options.limit === 0) {
return;
}
const query = {
_id: {},
};
if (options.reverse) {
if (options.start) {
query._id.$lte = options.start;
}
if (options.end) {
query._id.$gte = options.end;
}
if (options.gt) {
query._id.$lt = options.gt;
}
if (options.gte) {
query._id.$lte = options.gte;
}
if (options.lt) {
query._id.$gt = options.lt;
}
if (options.lte) {
query._id.$gte = options.lte;
}
} else {
if (options.start) {
query._id.$gte = options.start;
}
if (options.end) {
query._id.$lte = options.end;
}
if (options.gt) {
query._id.$gt = options.gt;
}
if (options.gte) {
query._id.$gte = options.gte;
}
if (options.lt) {
query._id.$lt = options.lt;
}
if (options.lte) {
query._id.$lte = options.lte;
}
}
if (!Object.keys(query._id).length) {
delete query._id;
}
if (searchOptions) {
Object.assign(query, searchOptions);
}
this._cursor = c.find(query).sort({
_id: options.reverse ? -1 : 1,
});
if (options.limit && options.limit !== -1) {
this._cursor = this._cursor.limit(options.limit);
}
this._options = options;
this._destroyed = false;
this.on('end', this._cleanup.bind(this));
}
_read() {
if (this._destroyed) {
return;
}
this._cursor.next((err, doc) => {
if (this._destroyed) {
return;
}
if (err) {
this.emit('error', err);
return;
}
let key = undefined;
let value = undefined;
if (doc) {
key = doc._id;
MongoUtils.unserialize(doc.value);
value = JSON.stringify(doc.value);
}
if (key === undefined && value === undefined) {
this.push(null);
} else if (this._options.keys !== false &&
this._options.values === false) {
this.push(key);
} else if (this._options.keys === false &&
this._options.values !== false) {
this.push(value);
} else {
this.push({
key,
value,
});
}
});
}
_cleanup() {
if (this._destroyed) {
return;
}
this._destroyed = true;
this._cursor.close(err => {
if (err) {
this.emit('error', err);
return;
}
this.emit('close');
});
}
destroy() {
return this._cleanup();
}
}
module.exports = MongoReadStream;

View File

@ -1,30 +0,0 @@
function escape(obj) {
return JSON.parse(JSON.stringify(obj).
replace(/\$/g, '\uFF04').
replace(/\./g, '\uFF0E'));
}
function unescape(obj) {
return JSON.parse(JSON.stringify(obj).
replace(/\uFF04/g, '$').
replace(/\uFF0E/g, '.'));
}
function serialize(objMD) {
// Tags require special handling since dot and dollar are accepted
if (objMD.tags) {
// eslint-disable-next-line
objMD.tags = escape(objMD.tags);
}
}
function unserialize(objMD) {
// Tags require special handling
if (objMD.tags) {
// eslint-disable-next-line
objMD.tags = unescape(objMD.tags);
}
}
module.exports = { escape, unescape, serialize, unserialize };

View File

@ -1,375 +0,0 @@
'use strict'; // eslint-disable-line strict
const errors = require('../../../errors');
const BucketInfo = require('../../../models/BucketInfo');
const { getURIComponents, getRequestBody, sendResponse } = require('./utils');
class BucketdRoutes {
/**
* Create a new Bucketd routes instance
* This class implements the bucketd Metadata protocol and is used in
* the Metadata Proxy Server to implement this protocol on top of
* various metadata backends.
*
* Implementation note: the adaptations performed in the methods of
* the class MetadataWrapper are not required in this context.
* For this reason, the methods of the `client' instance are directly
* called from this class, somewhat defeating the encapsulation of the
* wrapper.
*
* @param {Arsenal.storage.metadata.MetadataWrapper} metadataWrapper - to
* be used as a translation target for the bucketd protocol.
* @param {werelogs.Logger} logger - werelogs logger object
*/
constructor(metadataWrapper, logger) {
this._metadataWrapper = metadataWrapper;
this._logger = logger;
}
// Metadata Wrapper's wrapper
// `attributes' context methods
_getBucketAttributes(req, res, bucketName, logger) {
return this._metadataWrapper.client.getBucketAttributes(
bucketName, logger, (err, data) => {
if (err) {
logger.error('Failed to get bucket attributes',
{ bucket: bucketName, error: err });
return sendResponse(req, res, logger, err);
}
if (data === undefined) {
return sendResponse(req, res, logger,
errors.NoSuchBucket);
}
return sendResponse(req, res, logger, null,
BucketInfo.fromObj(data).serialize());
});
}
_putBucketAttributes(req, res, bucketName, data, logger) {
return this._metadataWrapper.client.putBucketAttributes(
bucketName, BucketInfo.deSerialize(data), logger, err =>
sendResponse(req, res, logger, err));
}
// `bucket' context methods
_createBucket(req, res, bucketName, data, logger) {
return this._metadataWrapper.client.createBucket(
bucketName, BucketInfo.deSerialize(data), logger, err =>
sendResponse(req, res, logger, err));
}
_deleteBucket(req, res, bucketName, logger) {
return this._metadataWrapper.client.deleteBucket(
bucketName, logger, err =>
sendResponse(req, res, logger, err));
}
_putObject(req, res, bucketName, objectName, objectValue, params, logger) {
let parsedValue;
try {
parsedValue = JSON.parse(objectValue);
} catch (err) {
logger.error('Malformed JSON value', { value: objectValue });
return sendResponse(req, res, logger, errors.BadRequest);
}
return this._metadataWrapper.client.putObject(
bucketName, objectName, parsedValue,
params, logger, (err, data) =>
sendResponse(req, res, logger, err, data));
}
_getObject(req, res, bucketName, objectName, params, logger) {
return this._metadataWrapper.client.getObject(
bucketName, objectName, params, logger, (err, data) =>
sendResponse(req, res, logger, err, data));
}
_deleteObject(req, res, bucketName, objectName, params, logger) {
return this._metadataWrapper.client.deleteObject(
bucketName, objectName, params, logger, (err, data) =>
sendResponse(req, res, logger, err, data));
}
_listObject(req, res, bucketName, params, logger) {
const listingParameters = params || {};
if (listingParameters.listingType === undefined) {
listingParameters.listingType = 'Delimiter';
}
if (listingParameters.maxKeys) {
listingParameters.maxKeys = Number.parseInt(params.maxKeys, 10);
}
return this._metadataWrapper.client.listObject(
bucketName, listingParameters, logger, (err, data) =>
sendResponse(req, res, logger, err, data));
}
// `admin' context methods
_checkHealth(req, res, logger) {
return this._metadataWrapper.checkHealth(logger, (err, resp) => {
if (err) {
logger.error('Failed the health check',
{ error: err, method: '_checkHealth' });
return sendResponse(req, res, logger, err);
}
return sendResponse(req, res, logger, undefined, resp);
});
}
_createRequestLogger(req) {
const uids = req.headers['x-scal-request-uids'];
const logger = uids === undefined ?
this._logger.newRequestLogger() :
this._logger.newRequestLoggerFromSerializedUids(uids);
logger.trace('new request', { method: req.method, url: req.url });
return logger;
}
// `parallel' context methods
_getBucketAndObjectMD(req, res, bucketName, objectName, params, logger) {
return this._metadataWrapper.client.getBucketAndObject(
bucketName, objectName, params, logger, (err, data) =>
sendResponse(req, res, logger, err, data));
}
// Internal routes
/**
* Handle routes related to operations on bucket attributes
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @param {object} uriComponents - URI breakdown of the request to process
* @param {werelogs.Logger} logger - werelogs logger object
* @return {undefined}
*/
_attributesRoutes(req, res, uriComponents, logger) {
if (uriComponents.bucketName === undefined) {
logger.error('Missing bucket name for attributes route',
{ uriComponents });
return sendResponse(req, res, logger, errors.BadRequest);
}
switch (req.method) {
case 'GET':
return this._getBucketAttributes(
req, res,
uriComponents.bucketName, logger, (err, attrs) =>
sendResponse(req, res, logger, err, attrs));
case 'POST':
return getRequestBody(logger, req, (err, body) => {
if (err) {
return sendResponse(req, res, logger, err);
}
return this._putBucketAttributes(
req, res,
uriComponents.bucketName, body, logger, err =>
sendResponse(req, res, logger, err));
});
default:
return sendResponse(req, res, logger, errors.RouteNotFound);
}
}
/**
* Handle routes related to operations on buckets
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @param {object} uriComponents - URI breakdown of the request to process
* @param {string} uriComponents.namespace - Select the control plane with
* `_' or the data plane with
* `default'.
* @param {string} uriComponents.context - Targets the bucket itself with
* `attributes' or the content of
* the bucket with `bucket'.
* @param {string} uriComponents.bucketName - The name of the bucket
* @param {string} uriComponents.objectName - the key of the object in the
* bucket
* @param {werelogs.Logger} logger - werelogs logger object
* @return {undefined}
*/
_bucketRoutes(req, res, uriComponents, logger) {
if (uriComponents.bucketName === undefined) {
logger.error('Missing bucket name for bucket route',
{ uriComponents });
return sendResponse(req, res, logger, errors.BadRequest);
}
switch (req.method) {
case 'GET':
return this._listObject(req, res,
uriComponents.bucketName,
uriComponents.options,
logger);
case 'DELETE':
return this._deleteBucket(req, res,
uriComponents.bucketName, logger);
case 'POST':
return getRequestBody(logger, req, (err, body) => {
if (err) {
return sendResponse(req, res, logger, err);
}
return this._createBucket(req, res,
uriComponents.bucketName,
body, logger);
});
default:
return sendResponse(req, res, logger, errors.RouteNotFound);
}
}
/**
* Handle routes related to operations on objects
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @param {object} uriComponents - URI breakdown of the request to process
* @param {werelogs.Logger} logger - werelogs logger object
* @return {undefined}
*/
_objectRoutes(req, res, uriComponents, logger) {
if (uriComponents.bucketName === undefined) {
logger.error('Missing bucket name for object route',
{ uriComponents });
return sendResponse(req, res, logger, errors.BadRequest);
}
switch (req.method) {
case 'GET':
return this._getObject(req, res,
uriComponents.bucketName,
uriComponents.objectName,
uriComponents.options,
logger);
case 'DELETE':
return this._deleteObject(req, res,
uriComponents.bucketName,
uriComponents.objectName,
uriComponents.options,
logger);
case 'POST':
return getRequestBody(logger, req, (err, body) =>
this._putObject(req, res,
uriComponents.bucketName,
uriComponents.objectName,
body,
uriComponents.options,
logger));
default:
return sendResponse(req, res, logger, errors.RouteNotFound);
}
}
/**
* Handle routes related to operations on both objects and buckets
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @param {object} uriComponents - URI breakdown of the request to process
* @param {werelogs.Logger} logger - werelogs logger object
* @return {undefined}
*/
_parallelRoutes(req, res, uriComponents, logger) {
if (uriComponents.bucketName === undefined) {
logger.error('Missing bucket name for parallel route',
{ uriComponents });
return sendResponse(req, res, logger, errors.BadRequest);
}
switch (req.method) {
case 'GET':
return this._getBucketAndObjectMD(req, res,
uriComponents.bucketName,
uriComponents.objectName,
uriComponents.options,
logger);
default:
return sendResponse(req, res, logger, errors.RouteNotFound);
}
}
/**
* Handle default routes. e.g. URI starting with /default/
* (or anything excepted an underscore)
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @param {object} uriComponents - URI breakdown of the request to process
* @param {werelogs.Logger} logger - werelogs logger object
* @return {undefined}
*/
_defaultRoutes(req, res, uriComponents, logger) {
switch (uriComponents.context) {
case 'leader':
case 'informations':
logger.trace(`${uriComponents.context} operation`);
return sendResponse(req, res, logger, errors.NotImplemented);
case 'metadataInformation':
return sendResponse(req, res, logger, undefined,
'{"metadataVersion":2}');
case 'parallel':
logger.trace(`${uriComponents.context} operation`);
if (uriComponents.objectName) {
return this._parallelRoutes(req, res, uriComponents, logger);
}
return sendResponse(req, res, logger, errors.RouteNotFound);
case 'bucket':
logger.trace(`${uriComponents.context} operation`);
if (uriComponents.objectName) {
return this._objectRoutes(req, res, uriComponents, logger);
}
return this._bucketRoutes(req, res, uriComponents, logger);
case 'attributes':
logger.trace(`${uriComponents.context} operation`);
return this._attributesRoutes(req, res, uriComponents, logger);
default:
logger.error('invalid URI', { uriComponents });
return sendResponse(req, res, logger, errors.RouteNotFound);
}
}
/**
* Handle admin routes. e.g. URI starting with /_/
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @param {object} uriComponents - URI breakdown of the request to process
* @param {werelogs.Logger} logger - werelogs logger object
* @return {undefined}
*/
_adminRoutes(req, res, uriComponents, logger) {
switch (uriComponents.context) {
case 'healthcheck':
return this._checkHealth(req, res, logger);
default:
return sendResponse(req, res, logger, errors.NotImplemented);
}
}
// The route dispatching method
/**
* dispatch the HTTP request to the appropriate handling function.
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @return {undefined}
*/
dispatch(req, res) {
const adminNamespace = '_';
const logger = this._createRequestLogger(req);
const uriComponents = getURIComponents(req.url, logger);
if (!uriComponents) {
return sendResponse(req, res, logger, errors.BadRequest);
}
switch (uriComponents.namespace) {
case adminNamespace:
return this._adminRoutes(req, res, uriComponents, logger);
default: // coincidently matches the `default' literal namespace as well
return this._defaultRoutes(req, res, uriComponents, logger);
}
}
}
module.exports = BucketdRoutes;

View File

@ -1,33 +0,0 @@
# Metatada Proxy Server
## Design goals
## Design choices
## Implementation details
## How to run the proxy server
```js
const werelogs = require('werelogs');
const MetadataWrapper = require('arsenal')
.storage.metadata.MetadataWrapper;
const Server = require('arsenal')
.storage.metadata.proxy.Server;
const logger = new werelogs.Logger('MetadataProxyServer',
'debug', 'debug');
const metadataWrapper = new MetadataWrapper('mem', {},
null, logger);
const server = new Server(metadataWrapper,
{
port: 9001,
workers: 1,
},
logger);
server.start(() => {
logger.info('Metadata Proxy Server successfully started. ' +
`Using the ${metadataWrapper.implName} backend`);
});
```

View File

@ -1,105 +0,0 @@
'use strict'; // eslint-disable-line strict
const cluster = require('cluster');
const HttpServer = require('../../../network/http/server');
const BucketdRoutes = require('./BucketdRoutes');
const requiresOneWorker = {
// in memory kvs storage is not shared across processes
memorybucket: true,
};
class Server {
/**
* Create a new Metadata Proxy Server instance
*
* The Metadata Proxy Server is an HTTP server that translates
* requests of the bucketd sub-protocol into function calls to
* a properly configured MetadataWrapper instance. Such instance
* can use any of the available metadata backends available.
*
* @param {arsenal.storage.metadata.MetadataWrapper} metadataWrapper -
* @param {Object} configuration -
* @param {number} configuration.port -
* @param {number} configuration.workers -
* @param {werelogs.Logger} logger -
*/
constructor(metadataWrapper, configuration, logger) {
this._configuration = configuration;
if (requiresOneWorker[metadataWrapper.implName] &&
this._configuration.workers !== 1) {
logger.warn('This metadata backend requires only one worker',
{ metadataBackend: metadataWrapper.implName });
this._configuration.workers = 1;
}
this._logger = logger;
this._metadataWrapper = metadataWrapper;
this._proxyRoutes = new BucketdRoutes(metadataWrapper, this._logger);
this._httpServer = null;
this._installSignalHandlers();
}
_cleanup() {
if (cluster.isWorker) {
this._logger.info('Server worker shutting down...');
this._httpServer.stop();
} else {
this._logger.info('Server shutting down...');
}
return process.exit(0);
}
_installSignalHandlers() {
process.on('SIGINT', () => { this._cleanup(); });
process.on('SIGHUP', () => { this._cleanup(); });
process.on('SIGQUIT', () => { this._cleanup(); });
process.on('SIGTERM', () => { this._cleanup(); });
process.on('SIGPIPE', () => {});
}
/**
* Start the Metadata Proxy Server instance
*
* @param {Function} cb - called with no argument when the onListening event
* is triggered
* @return {undefined}
*/
start(cb) {
if (cluster.isMaster) {
for (let i = 0; i < this._configuration.workers; ++i) {
cluster.fork();
}
cluster.on('disconnect', worker => {
this._logger
.info(`worker ${worker.process.pid} exited, respawning.`);
cluster.fork();
});
} else {
this._httpServer = new HttpServer(this._configuration.port,
this._logger);
if (this._configuration.bindAddress) {
this._httpServer.setBindAddress(
this._configuration.bindAddress);
}
this._httpServer
.onRequest((req, res) => this._proxyRoutes.dispatch(req, res))
.onListening(() => {
this._logger.info(
'Metadata Proxy Server now listening on' +
` port ${this._configuration.port}`);
if (cb) {
return this._metadataWrapper.setup(cb);
}
return this._metadataWrapper.setup(() => {
this._logger.info('MetadataWrapper setup complete.');
});
})
.start();
}
}
}
module.exports = Server;

View File

@ -1,183 +0,0 @@
const url = require('url');
const querystring = require('querystring');
const errors = require('../../../errors');
/**
* Extracts components from URI.
* @param {string} uri - uri part of the received request
* @param {werelogs.Logger} logger -
* @return {object} ret - URI breakdown of the request to process
* @return {string} ret.namespace - targeted plane, control plane is targeted
* with `_' and the data plane with `default'.
* @return {string} ret.context - Targets the bucket itself with
* `attributes' or the content of
* the bucket with `bucket'.
* @return {string} ret.bucketName - The name of the bucket
* @return {string} ret.objectName - the key of the object in the bucket
*/
function getURIComponents(uri, logger) {
try {
if (uri.charAt(0) !== '/') {
return {};
}
const { pathname, query } = url.parse(uri);
const options = query ? querystring.parse(query) : {};
const typeIndex = pathname.indexOf('/', 1);
const bucketIndex = pathname.indexOf('/', typeIndex + 1);
const objectIndex = pathname.indexOf('/', bucketIndex + 1);
if (typeIndex === -1 || typeIndex === pathname.length - 1) {
return {};
}
if (bucketIndex === -1) {
return {
namespace: pathname.substring(1, typeIndex),
context: pathname.substring(typeIndex + 1),
};
}
if (bucketIndex === pathname.length - 1) {
return {
namespace: pathname.substring(1, typeIndex),
context: pathname.substring(typeIndex + 1, bucketIndex),
};
}
if (objectIndex === -1) {
return {
namespace: pathname.substring(1, typeIndex),
context: pathname.substring(typeIndex + 1, bucketIndex),
bucketName: pathname.substring(bucketIndex + 1),
options,
};
}
if (objectIndex === pathname.length - 1) {
return {
namespace: pathname.substring(1, typeIndex),
context: pathname.substring(typeIndex + 1, bucketIndex),
bucketName: pathname.substring(bucketIndex + 1, objectIndex),
options,
};
}
return {
namespace: pathname.substring(1, typeIndex),
context: pathname.substring(typeIndex + 1, bucketIndex),
bucketName: pathname.substring(bucketIndex + 1, objectIndex),
objectName: decodeURIComponent(pathname.substring(objectIndex + 1)),
options,
};
} catch (ex) {
logger.error('Invalid URI: failed to parse',
{ uri, error: ex, errorStack: ex.stack,
message: ex.message });
return null;
}
}
/**
* Extracts the body of the request through a callback
* @param {werelogs.Logger} logger - instance of the logger that will emit the
* log entry
* @param {http.IncomingMessage} request - request received from bucketclient
* @param {Function} cb - function which has an interest in the request body.
* The first parameter is err and may be falsey
* The second parameter is the body of the request
* @return {undefined}
*/
function getRequestBody(logger, request, cb) {
const body = [];
let bodyLen = 0;
request.on('data', data => {
body.push(data);
bodyLen += data.length;
}).on('error', cb).on('end', () => {
cb(null, Buffer.concat(body, bodyLen).toString());
}).on('close', () => {
logger.error('Connection closed by remote peer');
/* Execution will eventually reach the sendResponse code which will
* trigger the proper cleanup as the remote peer already hung up and
* nobody is on the line to get the message */
cb(errors.BadRequest);
});
}
/**
* Emit a log entry corresponding to the end of the request
*
* @param {werelogs.Logger} logger - instance of the logger that will emit the
* log entry
* @param {http.IncomingMessage} req - request being processed
* @param {object} statusCode - HTTP status code sent back to the client
* @param {object} statusMessage - HTTP status message sent back to the client
* @return {undefined}
*/
function _logRequestEnd(logger, req, statusCode, statusMessage) {
const info = {
clientIp: req.socket.remoteAddress,
clientPort: req.socket.remotePort,
httpMethod: req.method,
httpURL: req.url,
httpCode: statusCode,
httpMessage: statusMessage,
};
logger.end('finished handling request', info);
}
/**
* Request processing exit point, sends back to the client the specified data
* and/or error code
*
* @param {http.IncomingMessage} req - request being processed
* @param {http.OutgoingMessage} res - response associated to the request
* @param {werelogs.Logger} log - instance of the logger to use
* @param {Arsenal.Error} err - if not null, defines the HTTP status
* code and message
* @param {string} data - if not null, used as the response body. If `data'
* isn't a string, it's considered as a JSON object and
* it's content get serialized before being sent.
* @return {undefined}
*/
function sendResponse(req, res, log, err, data) {
let statusCode;
let statusMessage;
if (err) {
statusCode = err.code;
statusMessage = err.message;
} else {
statusCode = errors.ok.code;
statusMessage = errors.ok.message;
}
if (data) {
let resData = data;
if (typeof resData === 'object') {
resData = JSON.stringify(data);
} else if (typeof resData === 'number') {
resData = resData.toString();
}
/*
* Encoding data to binary provides a hot path to write data
* directly to the socket, without node.js trying to encode the data
* over and over again.
*/
const rawData = Buffer.from(resData, 'utf8');
/*
* Using Buffer.bytelength is not required here because data is binary
* encoded, data.length would give us the exact byte length
*/
res.writeHead(statusCode, statusMessage, {
'content-length': rawData.length,
});
res.write(rawData);
} else {
res.writeHead(statusCode, statusMessage, { 'content-length': 0 });
}
return res.end(() => {
_logRequestEnd(log, req, statusCode, statusMessage);
});
}
module.exports = {
getURIComponents,
getRequestBody,
sendResponse,
};

2432
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -1,16 +1,16 @@
{ {
"name": "arsenal", "name": "arsenal",
"engines": { "engines": {
"node": ">=8" "node": ">=6.9.5"
}, },
"version": "8.0.6", "version": "7.4.3",
"description": "Common utilities for the S3 project components", "description": "Common utilities for the S3 project components",
"main": "index.js", "main": "index.js",
"repository": { "repository": {
"type": "git", "type": "git",
"url": "git+https://github.com/scality/Arsenal.git" "url": "git+https://github.com/scality/Arsenal.git"
}, },
"author": "Scality Inc.", "author": "Giorgio Regni",
"license": "Apache-2.0", "license": "Apache-2.0",
"bugs": { "bugs": {
"url": "https://github.com/scality/Arsenal/issues" "url": "https://github.com/scality/Arsenal/issues"
@ -21,15 +21,12 @@
"JSONStream": "^1.0.0", "JSONStream": "^1.0.0",
"ajv": "4.10.0", "ajv": "4.10.0",
"async": "~2.1.5", "async": "~2.1.5",
"bson": "2.0.4",
"debug": "~2.3.3", "debug": "~2.3.3",
"diskusage": "^1.1.1", "diskusage": "^1.1.1",
"fcntl": "github:scality/node-fcntl",
"ioredis": "4.9.5", "ioredis": "4.9.5",
"ipaddr.js": "1.2.0", "ipaddr.js": "1.2.0",
"level": "~5.0.1", "level": "~5.0.1",
"level-sublevel": "~6.6.5", "level-sublevel": "~6.6.5",
"mongodb": "^3.0.1",
"node-forge": "^0.7.1", "node-forge": "^0.7.1",
"simple-glob": "^0.1", "simple-glob": "^0.1",
"socket.io": "~1.7.3", "socket.io": "~1.7.3",

View File

@ -1,105 +0,0 @@
'use strict'; // eslint-disable-line strict
const assert = require('assert');
const RedisClient = require('../../../lib/metrics/RedisClient');
const { backbeat } = require('../../../');
// expirations
const EXPIRY = 86400; // 24 hours
const THROUGHPUT_EXPIRY = 900; // 15 minutes
// setup redis client
const config = {
host: '127.0.0.1',
port: 6379,
enableOfflineQueue: false,
};
const fakeLogger = {
trace: () => {},
error: () => {},
};
const redisClient = new RedisClient(config, fakeLogger);
// setup stats model
const sites = ['site1', 'site2'];
const metrics = new backbeat.Metrics({
redisConfig: config,
validSites: ['site1', 'site2', 'all'],
internalStart: Date.now() - (EXPIRY * 1000), // 24 hours ago.
}, fakeLogger);
// Since many methods were overwritten, these tests should validate the changes
// made to the original methods
describe('Metrics class', () => {
afterEach(() => redisClient.clear(() => {}));
it('should not crash on empty results', done => {
const redisKeys = {
ops: 'bb:crr:ops',
bytes: 'bb:crr:bytes',
opsDone: 'bb:crr:opsdone',
bytesDone: 'bb:crr:bytesdone',
bytesFail: 'bb:crr:bytesfail',
opsFail: 'bb:crr:opsfail',
failedCRR: 'bb:crr:failed',
opsPending: 'bb:crr:bytespending',
bytesPending: 'bb:crr:opspending',
};
const routes = backbeat.routes(redisKeys, sites);
const details = routes.find(route =>
route.category === 'metrics' && route.type === 'all');
details.site = 'all';
metrics.getAllMetrics(details, (err, res) => {
assert.ifError(err);
const expected = {
pending: {
description: 'Number of pending replication operations ' +
'(count) and bytes (size)',
results: {
count: 0,
size: 0,
},
},
backlog: {
description: 'Number of incomplete replication operations' +
' (count) and number of incomplete bytes transferred' +
' (size)',
results: {
count: 0,
size: 0,
},
},
completions: {
description: 'Number of completed replication operations' +
' (count) and number of bytes transferred (size) in ' +
`the last ${EXPIRY} seconds`,
results: {
count: 0,
size: 0,
},
},
failures: {
description: 'Number of failed replication operations ' +
`(count) and bytes (size) in the last ${EXPIRY} ` +
'seconds',
results: {
count: 0,
size: 0,
},
},
throughput: {
description: 'Current throughput for replication' +
' operations in ops/sec (count) and bytes/sec (size) ' +
`in the last ${THROUGHPUT_EXPIRY} seconds`,
results: {
count: '0.00',
size: '0.00',
},
},
};
assert.deepStrictEqual(res, expected);
done();
});
});
});

View File

@ -1,319 +0,0 @@
'use strict'; // eslint-disable-line strict
const werelogs = require('werelogs');
const assert = require('assert');
const async = require('async');
const logger = new werelogs.Logger('MetadataProxyServer', 'debug', 'debug');
const MetadataWrapper =
require('../../../lib/storage/metadata/MetadataWrapper');
const BucketRoutes =
require('../../../lib/storage/metadata/proxy/BucketdRoutes');
const metadataWrapper = new MetadataWrapper('mem', {}, null, logger);
const { RequestDispatcher } = require('../../utils/mdProxyUtils');
const routes = new BucketRoutes(metadataWrapper, logger);
const dispatcher = new RequestDispatcher(routes);
const Bucket = 'test';
const bucketInfo = {
acl: {
Canned: 'private',
FULL_CONTROL: [],
WRITE: [],
WRITE_ACP: [],
READ: [],
READ_ACP: [],
},
name: Bucket,
owner: '9d8fe19a78974c56dceb2ea4a8f01ed0f5fecb9d29f80e9e3b84104e4a3ea520',
ownerDisplayName: 'anonymousCoward',
creationDate: '2018-06-04T17:45:42.592Z',
mdBucketModelVersion: 8,
transient: false,
deleted: false,
serverSideEncryption: null,
versioningConfiguration: null,
locationConstraint: 'us-east-1',
readLocationConstraint: 'us-east-1',
cors: null,
replicationConfiguration: null,
lifecycleConfiguration: null,
uid: 'fea97818-6a9a-11e8-9777-e311618cc5d4',
isNFS: null,
};
const objects = [
'aaa',
'bbb/xaa',
'bbb/xbb',
'bbb/xcc',
'ccc',
'ddd',
];
function _getExpectedListing(prefix, objects) {
const filtered = objects.map(key => {
const deprefixed = key.slice(prefix.length);
return deprefixed.replace(/[/].*/, '/');
});
const keySet = {};
return filtered.filter(key => {
if (keySet[key]) {
return false;
}
if (key === '') {
return false;
}
keySet[key] = true;
return true;
});
}
function _listingURL(prefix, marker) {
const reSlash = /[/]/g;
const escapedPrefix = prefix.replace(reSlash, '%2F');
const escapedMarker = marker.replace(reSlash, '%2F');
return `/default/bucket/${Bucket}?delimiter=%2F&prefix=` +
`${escapedPrefix}&maxKeys=1&marker=${escapedMarker}`;
}
function _listObjects(prefix, objects, cb) {
const keys = _getExpectedListing(prefix, objects);
const markers = keys.slice(0);
markers.unshift(undefined);
const lastKey = keys[keys.length - 1];
const listing = keys.map((key, index) => ({
key,
marker: markers[index],
NextMarker: markers[index + 1],
IsTruncated: key !== lastKey,
isPrefix: key.endsWith('/'),
}));
async.mapLimit(listing, 5, (obj, next) => {
const currentMarker = obj.marker === undefined ? '' : obj.marker;
dispatcher.get(_listingURL(prefix, prefix + currentMarker),
(err, response, body) => {
if (err) {
return next(err);
}
if (obj.isPrefix) {
assert.strictEqual(body.Contents.length, 0);
assert.strictEqual(body.CommonPrefixes.length,
1);
assert.strictEqual(body.CommonPrefixes[0],
prefix + obj.key);
} else {
assert.strictEqual(body.Contents.length, 1);
assert.strictEqual(body.CommonPrefixes.length,
0);
assert.strictEqual(body.Contents[0].key,
prefix + obj.key);
}
assert.strictEqual(body.IsTruncated,
obj.IsTruncated);
if (body.IsTruncated) {
assert.strictEqual(body.NextMarker,
prefix + obj.NextMarker);
}
return next();
});
}, err => cb(err));
}
function _createObjects(objects, cb) {
async.mapLimit(objects, 5, (key, next) => {
dispatcher.post(`/default/bucket/${Bucket}/${key}`,
{ key }, next);
}, err => {
cb(err);
});
}
function _readObjects(objects, cb) {
async.mapLimit(objects, 5, (key, next) => {
dispatcher.get(`/default/bucket/${Bucket}/${key}`,
(err, response, body) => {
assert.deepStrictEqual(body.key, key);
next(err);
});
}, err => {
cb(err);
});
}
function _deleteObjects(objects, cb) {
async.mapLimit(objects, 5, (key, next) => {
dispatcher.delete(`/default/bucket/${Bucket}/${key}`,
err => next(err));
}, err => {
cb(err);
});
}
describe('Basic Metadata Proxy Server test',
function bindToThis() {
this.timeout(10000);
it('Shoud get the metadataInformation', done => {
dispatcher.get('/default/metadataInformation',
(err, response, body) => {
if (err) {
return done(err);
}
assert.deepStrictEqual(
body, { metadataVersion: 2 });
return done();
});
});
});
describe('Basic Metadata Proxy Server CRUD test', function bindToThis() {
this.timeout(10000);
beforeEach(done => {
dispatcher.post(`/default/bucket/${Bucket}`, bucketInfo,
done);
});
afterEach(done => {
dispatcher.delete(`/default/bucket/${Bucket}`, done);
});
it('Should get the bucket attributes', done => {
dispatcher.get(`/default/attributes/${Bucket}`,
(err, response, body) => {
if (err) {
return done(err);
}
assert.deepStrictEqual(body.name,
bucketInfo.name);
return done();
});
});
it('Should crud an object', done => {
async.waterfall([
next => dispatcher.post(`/default/bucket/${Bucket}/test1`,
{ foo: 'gabu' }, err => next(err)),
next => dispatcher.get(`/default/bucket/${Bucket}/test1`,
(err, response, body) => {
if (!err) {
assert.deepStrictEqual(body.foo,
'gabu');
next(err);
}
}),
next => dispatcher.post(`/default/bucket/${Bucket}/test1`,
{ foo: 'zome' }, err => next(err)),
next => dispatcher.get(`/default/bucket/${Bucket}/test1`,
(err, response, body) => {
if (!err) {
assert.deepStrictEqual(body.foo,
'zome');
next(err);
}
}),
next => dispatcher.delete(`/default/bucket/${Bucket}/test1`,
err => next(err)),
], err => done(err));
});
it('Should list objects', done => {
async.waterfall([
next => _createObjects(objects, next),
next => _readObjects(objects, next),
next => _listObjects('', objects, next),
next => _listObjects('bbb/', objects, next),
next => _deleteObjects(objects, next),
], err => {
done(err);
});
});
it('Should update bucket properties', done => {
dispatcher.get(
`/default/attributes/${Bucket}`, (err, response, body) => {
assert.strictEqual(err, null);
const bucketInfo = body;
const newOwnerDisplayName = 'divertedfrom';
bucketInfo.ownerDisplayName = newOwnerDisplayName;
dispatcher.post(
`/default/attributes/${Bucket}`, bucketInfo, err => {
assert.strictEqual(err, null);
dispatcher.get(
`/default/attributes/${Bucket}`,
(err, response, body) => {
assert.strictEqual(err, null);
const newBucketInfo = body;
assert.strictEqual(
newBucketInfo.ownerDisplayName,
newOwnerDisplayName);
done(null);
});
});
});
});
it('Should fail to list a non existing bucket', done => {
dispatcher.get('/default/bucket/nonexisting',
(err, response) => {
assert.strictEqual(
response.responseHead.statusCode,
404);
done(err);
});
});
it('Should fail to get attributes from a non existing bucket', done => {
dispatcher.get('/default/attributes/nonexisting',
(err, response) => {
assert.strictEqual(
response.responseHead.statusCode,
404);
done(err);
});
});
it('should succeed a health check', done => {
dispatcher.get('/_/healthcheck', (err, response, body) => {
if (err) {
return done(err);
}
const expectedResponse = {
memorybucket: {
code: 200,
message: 'OK',
},
};
assert.strictEqual(response.responseHead.statusCode, 200);
assert.deepStrictEqual(body, expectedResponse);
return done(err);
});
});
it('should work with parallel route', done => {
const objectName = 'theObj';
async.waterfall([
next => _createObjects([objectName], next),
next => {
dispatcher.get(
`/default/parallel/${Bucket}/${objectName}`,
(err, response, body) => {
if (err) {
return next(err);
}
assert.strictEqual(response.responseHead.statusCode,
200);
const bucketMD = JSON.parse(body.bucket);
const objectMD = JSON.parse(body.obj);
const expectedObjectMD = { key: objectName };
assert.deepStrictEqual(bucketMD.name,
bucketInfo.name);
assert.deepStrictEqual(objectMD, expectedObjectMD);
return next(err);
});
},
next => _deleteObjects([objectName], next),
], done);
});
});

View File

@ -1,318 +0,0 @@
'use strict'; // eslint-disable-line strict
const assert = require('assert');
const async = require('async');
const RedisClient = require('../../../lib/metrics/RedisClient');
const StatsModel = require('../../../lib/metrics/StatsModel');
// setup redis client
const config = {
host: '127.0.0.1',
port: 6379,
enableOfflineQueue: false,
};
const fakeLogger = {
trace: () => {},
error: () => {},
};
const redisClient = new RedisClient(config, fakeLogger);
// setup stats model
const STATS_INTERVAL = 300; // 5 minutes
const STATS_EXPIRY = 86400; // 24 hours
const statsModel = new StatsModel(redisClient, STATS_INTERVAL, STATS_EXPIRY);
function setExpectedStats(expected) {
return expected.concat(
Array((STATS_EXPIRY / STATS_INTERVAL) - expected.length).fill(0));
}
// Since many methods were overwritten, these tests should validate the changes
// made to the original methods
describe('StatsModel class', () => {
const id = 'arsenal-test';
const id2 = 'test-2';
const id3 = 'test-3';
afterEach(() => redisClient.clear(() => {}));
it('should convert a 2d array columns into rows and vice versa using _zip',
() => {
const arrays = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
];
const res = statsModel._zip(arrays);
const expected = [
[1, 4, 7],
[2, 5, 8],
[3, 6, 9],
];
assert.deepStrictEqual(res, expected);
});
it('_zip should return an empty array if given an invalid array', () => {
const arrays = [];
const res = statsModel._zip(arrays);
assert.deepStrictEqual(res, []);
});
it('_getCount should return a an array of all valid integer values',
() => {
const res = statsModel._getCount([
[null, '1'],
[null, '2'],
[null, null],
]);
assert.deepStrictEqual(res, setExpectedStats([1, 2, 0]));
});
it('should correctly record a new request by default one increment',
done => {
async.series([
next => {
statsModel.reportNewRequest(id, (err, res) => {
assert.ifError(err);
const expected = [[null, 1], [null, 1]];
assert.deepStrictEqual(res, expected);
next();
});
},
next => {
statsModel.reportNewRequest(id, (err, res) => {
assert.ifError(err);
const expected = [[null, 2], [null, 1]];
assert.deepStrictEqual(res, expected);
next();
});
},
], done);
});
it('should record new requests by defined amount increments', done => {
function noop() {}
async.series([
next => {
statsModel.reportNewRequest(id, 9);
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests, setExpectedStats([9]));
next();
});
},
next => {
statsModel.reportNewRequest(id);
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests,
setExpectedStats([10]));
next();
});
},
next => {
statsModel.reportNewRequest(id, noop);
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests,
setExpectedStats([11]));
next();
});
},
], done);
});
it('should correctly record a 500 on the server', done => {
statsModel.report500(id, (err, res) => {
assert.ifError(err);
const expected = [[null, 1], [null, 1]];
assert.deepStrictEqual(res, expected);
done();
});
});
it('should respond back with total requests as an array', done => {
async.series([
next => {
statsModel.reportNewRequest(id, err => {
assert.ifError(err);
next();
});
},
next => {
statsModel.report500(id, err => {
assert.ifError(err);
next();
});
},
next => {
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
const expected = {
'requests': setExpectedStats([1]),
'500s': setExpectedStats([1]),
'sampleDuration': STATS_EXPIRY,
};
assert.deepStrictEqual(res, expected);
next();
});
},
], done);
});
it('should not crash on empty results', done => {
async.series([
next => {
statsModel.getStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
const expected = {
'requests': setExpectedStats([]),
'500s': setExpectedStats([]),
'sampleDuration': STATS_EXPIRY,
};
assert.deepStrictEqual(res, expected);
next();
});
},
next => {
statsModel.getAllStats(fakeLogger, id, (err, res) => {
assert.ifError(err);
const expected = {
'requests': setExpectedStats([]),
'500s': setExpectedStats([]),
'sampleDuration': STATS_EXPIRY,
};
assert.deepStrictEqual(res, expected);
next();
});
},
], done);
});
it('should return a zero-filled array if no ids are passed to getAllStats',
done => {
statsModel.getAllStats(fakeLogger, [], (err, res) => {
assert.ifError(err);
assert.deepStrictEqual(res.requests, setExpectedStats([]));
assert.deepStrictEqual(res['500s'], setExpectedStats([]));
done();
});
});
it('should get accurately reported data for given id from getAllStats',
done => {
statsModel.reportNewRequest(id, 9);
statsModel.reportNewRequest(id2, 2);
statsModel.reportNewRequest(id3, 3);
statsModel.report500(id);
async.series([
next => {
statsModel.getAllStats(fakeLogger, [id], (err, res) => {
assert.ifError(err);
assert.equal(res.requests[0], 9);
assert.equal(res['500s'][0], 1);
next();
});
},
next => {
statsModel.getAllStats(fakeLogger, [id, id2, id3],
(err, res) => {
assert.ifError(err);
assert.equal(res.requests[0], 14);
assert.deepStrictEqual(res.requests,
setExpectedStats([14]));
next();
});
},
], done);
});
it('should normalize to the nearest hour using normalizeTimestampByHour',
() => {
const date = new Date('2018-09-13T23:30:59.195Z');
const newDate = new Date(statsModel.normalizeTimestampByHour(date));
assert.strictEqual(date.getHours(), newDate.getHours());
assert.strictEqual(newDate.getMinutes(), 0);
assert.strictEqual(newDate.getSeconds(), 0);
assert.strictEqual(newDate.getMilliseconds(), 0);
});
it('should get previous hour using _getDatePreviousHour', () => {
const date = new Date('2018-09-13T23:30:59.195Z');
const newDate = statsModel._getDatePreviousHour(new Date(date));
const millisecondsInOneHour = 3600000;
assert.strictEqual(date - newDate, millisecondsInOneHour);
});
it('should get an array of hourly timestamps using getSortedSetHours',
() => {
const epoch = 1536882476501;
const millisecondsInOneHour = 3600000;
const expected = [];
let dateInMilliseconds = statsModel.normalizeTimestampByHour(
new Date(epoch));
for (let i = 0; i < 24; i++) {
expected.push(dateInMilliseconds);
dateInMilliseconds -= millisecondsInOneHour;
}
const res = statsModel.getSortedSetHours(epoch);
assert.deepStrictEqual(res, expected);
});
it('should apply TTL on a new sorted set using addToSortedSet', done => {
const key = 'a-test-key';
const score = 100;
const value = 'a-value';
const now = Date.now();
const nearestHour = statsModel.normalizeTimestampByHour(new Date(now));
statsModel.addToSortedSet(key, score, value, (err, res) => {
assert.ifError(err);
// check both a "zadd" and "expire" occurred
assert.equal(res, 1);
redisClient.ttl(key, (err, res) => {
assert.ifError(err);
// assert this new set has a ttl applied
assert(res > 0);
const adjustmentSecs = now - nearestHour;
const msInADay = 24 * 60 * 60 * 1000;
const msInAnHour = 60 * 60 * 1000;
const upperLimitSecs =
Math.ceil((msInADay - adjustmentSecs) / 1000);
const lowerLimitSecs =
Math.floor((msInADay - adjustmentSecs - msInAnHour) / 1000);
// assert new ttl is between 23 and 24 hours adjusted by time
// elapsed since normalized hourly time
assert(res >= lowerLimitSecs);
assert(res <= upperLimitSecs);
done();
});
});
});
});

View File

@ -269,33 +269,4 @@ describe('v4 headerAuthCheck', () => {
assert.strictEqual(res.params.version, 4); assert.strictEqual(res.params.version, 4);
done(); done();
}); });
it('should not return error if proxy_path header is added', done => {
// Freezes time so date created within function will be Feb 8, 2016
const clock = lolex.install(1454962445000);
/* eslint-disable camelcase */
const alteredRequest = createAlteredRequest({
proxy_path: 'proxy/1234' }, 'headers', request, headers);
/* eslint-enable camelcase */
const res = headerAuthCheck(alteredRequest, log);
clock.uninstall();
assert.strictEqual(res.err, null);
done();
});
it('should return InvalidRequest error if proxy_path header is invalid',
done => {
// Freezes time so date created within function will be Feb 8, 2016
const clock = lolex.install(1454962445000);
/* eslint-disable camelcase */
const alteredRequest = createAlteredRequest({
proxy_path: 'absc%2proxy/1234' }, 'headers', request, headers);
/* eslint-enable camelcase */
const res = headerAuthCheck(alteredRequest, log);
clock.uninstall();
assert.deepStrictEqual(res.err,
errors.InvalidArgument.customizeDescription(
'invalid proxy_path header'));
done();
});
}); });

View File

@ -225,34 +225,4 @@ describe('v4 queryAuthCheck', () => {
assert.strictEqual(res.params.version, 4); assert.strictEqual(res.params.version, 4);
done(); done();
}); });
it('should successfully return no error if proxy_path header is added',
done => {
// Freezes time so date created within function will be Feb 8, 2016
const clock = lolex.install(1454974984001);
/* eslint-disable camelcase */
const alteredRequest = createAlteredRequest({ proxy_path:
'proxy/1234' }, 'headers', request, query);
/* eslint-enable camelcase */
const res = queryAuthCheck(alteredRequest, log, alteredRequest.query);
clock.uninstall();
assert.deepStrictEqual(res.err, null);
done();
});
it('should return InvalidRequest error if proxy_path header is invalid',
done => {
// Freezes time so date created within function will be Feb 8, 2016
const clock = lolex.install(1454974984001);
/* eslint-disable camelcase */
const alteredRequest = createAlteredRequest({ proxy_path:
'absc%2proxy/1234' }, 'headers', request, query);
/* eslint-enable camelcase */
const res = queryAuthCheck(alteredRequest, log, alteredRequest.query);
clock.uninstall();
assert.deepStrictEqual(res.err,
errors.InvalidArgument.customizeDescription(
'invalid proxy_path header'));
done();
});
}); });

View File

@ -59,7 +59,6 @@ const testWebsiteConfiguration = new WebsiteConfiguration({
}); });
const testLocationConstraint = 'us-west-1'; const testLocationConstraint = 'us-west-1';
const testReadLocationConstraint = 'us-west-2';
const testCorsConfiguration = [ const testCorsConfiguration = [
{ id: 'test', { id: 'test',
@ -116,8 +115,6 @@ const testLifecycleConfiguration = {
}, },
], ],
}; };
const testUid = '99ae3446-7082-4c17-ac97-52965dc004ec';
// create a dummy bucket to test getters and setters // create a dummy bucket to test getters and setters
Object.keys(acl).forEach( Object.keys(acl).forEach(
@ -135,8 +132,7 @@ Object.keys(acl).forEach(
testWebsiteConfiguration, testWebsiteConfiguration,
testCorsConfiguration, testCorsConfiguration,
testReplicationConfiguration, testReplicationConfiguration,
testLifecycleConfiguration, testLifecycleConfiguration);
testUid, undefined, true);
describe('serialize/deSerialize on BucketInfo class', () => { describe('serialize/deSerialize on BucketInfo class', () => {
const serialized = dummyBucket.serialize(); const serialized = dummyBucket.serialize();
@ -155,7 +151,6 @@ Object.keys(acl).forEach(
versioningConfiguration: versioningConfiguration:
dummyBucket._versioningConfiguration, dummyBucket._versioningConfiguration,
locationConstraint: dummyBucket._locationConstraint, locationConstraint: dummyBucket._locationConstraint,
readLocationConstraint: dummyBucket._readLocationConstraint,
websiteConfiguration: dummyBucket._websiteConfiguration websiteConfiguration: dummyBucket._websiteConfiguration
.getConfig(), .getConfig(),
cors: dummyBucket._cors, cors: dummyBucket._cors,
@ -163,8 +158,6 @@ Object.keys(acl).forEach(
dummyBucket._replicationConfiguration, dummyBucket._replicationConfiguration,
lifecycleConfiguration: lifecycleConfiguration:
dummyBucket._lifecycleConfiguration, dummyBucket._lifecycleConfiguration,
uid: dummyBucket._uid,
isNFS: dummyBucket._isNFS,
}; };
assert.strictEqual(serialized, JSON.stringify(bucketInfos)); assert.strictEqual(serialized, JSON.stringify(bucketInfos));
done(); done();
@ -189,7 +182,6 @@ Object.keys(acl).forEach(
'string'); 'string');
assert.strictEqual(typeof dummyBucket.getCreationDate(), assert.strictEqual(typeof dummyBucket.getCreationDate(),
'string'); 'string');
assert.strictEqual(typeof dummyBucket.getUid(), 'string');
}); });
it('this should have the right acl\'s types', () => { it('this should have the right acl\'s types', () => {
assert.strictEqual(typeof dummyBucket.getAcl(), 'object'); assert.strictEqual(typeof dummyBucket.getAcl(), 'object');
@ -257,18 +249,6 @@ Object.keys(acl).forEach(
assert.deepStrictEqual(dummyBucket.getLocationConstraint(), assert.deepStrictEqual(dummyBucket.getLocationConstraint(),
testLocationConstraint); testLocationConstraint);
}); });
it('getReadLocationConstraint should return locationConstraint ' +
'if readLocationConstraint hasn\'t been set', () => {
assert.deepStrictEqual(dummyBucket.getReadLocationConstraint(),
testLocationConstraint);
});
it('getReadLocationConstraint should return readLocationConstraint',
() => {
dummyBucket._readLocationConstraint =
testReadLocationConstraint;
assert.deepStrictEqual(dummyBucket.getReadLocationConstraint(),
testReadLocationConstraint);
});
it('getCors should return CORS configuration', () => { it('getCors should return CORS configuration', () => {
assert.deepStrictEqual(dummyBucket.getCors(), assert.deepStrictEqual(dummyBucket.getCors(),
testCorsConfiguration); testCorsConfiguration);
@ -277,16 +257,6 @@ Object.keys(acl).forEach(
assert.deepStrictEqual(dummyBucket.getLifecycleConfiguration(), assert.deepStrictEqual(dummyBucket.getLifecycleConfiguration(),
testLifecycleConfiguration); testLifecycleConfiguration);
}); });
it('getUid should return unique id of bucket', () => {
assert.deepStrictEqual(dummyBucket.getUid(), testUid);
});
it('isNFS should return whether bucket is on NFS', () => {
assert.deepStrictEqual(dummyBucket.isNFS(), true);
});
it('setIsNFS should set whether bucket is on NFS', () => {
dummyBucket.setIsNFS(false);
assert.deepStrictEqual(dummyBucket.isNFS(), false);
});
}); });
describe('setters on BucketInfo class', () => { describe('setters on BucketInfo class', () => {
@ -358,7 +328,8 @@ Object.keys(acl).forEach(
protocol: 'https', protocol: 'https',
}, },
}; };
dummyBucket.setWebsiteConfiguration(newWebsiteConfiguration); dummyBucket
.setWebsiteConfiguration(newWebsiteConfiguration);
assert.deepStrictEqual(dummyBucket.getWebsiteConfiguration(), assert.deepStrictEqual(dummyBucket.getWebsiteConfiguration(),
newWebsiteConfiguration); newWebsiteConfiguration);
}); });
@ -410,26 +381,3 @@ Object.keys(acl).forEach(
}); });
}) })
); );
describe('uid default', () => {
it('should set uid if none is specified by constructor params', () => {
const dummyBucket = new BucketInfo(
bucketName, owner, ownerDisplayName, testDate,
BucketInfo.currentModelVersion(), acl[emptyAcl],
false, false, {
cryptoScheme: 1,
algorithm: 'sha1',
masterKeyId: 'somekey',
mandatory: true,
}, testVersioningConfiguration,
testLocationConstraint,
testWebsiteConfiguration,
testCorsConfiguration,
testReplicationConfiguration,
testLifecycleConfiguration);
const defaultUid = dummyBucket.getUid();
assert(defaultUid);
assert.strictEqual(defaultUid.length, 36);
});
});

View File

@ -175,10 +175,6 @@ function generateFilter(errorTag, tagObj) {
middleTags = '<Prefix>foo</Prefix><Prefix>bar</Prefix>' + middleTags = '<Prefix>foo</Prefix><Prefix>bar</Prefix>' +
`<Prefix>${tagObj.lastPrefix}</Prefix>`; `<Prefix>${tagObj.lastPrefix}</Prefix>`;
} }
if (tagObj.label === 'mult-tags') {
middleTags = '<And><Tag><Key>color</Key><Value>blue</Value></Tag>' +
'<Tag><Key>shape</Key><Value>circle</Value></Tag></And>';
}
Filter = `<Filter>${middleTags}</Filter>`; Filter = `<Filter>${middleTags}</Filter>`;
if (tagObj.label === 'also-prefix') { if (tagObj.label === 'also-prefix') {
Filter = '<Filter></Filter><Prefix></Prefix>'; Filter = '<Filter></Filter><Prefix></Prefix>';
@ -353,16 +349,4 @@ describe('LifecycleConfiguration class getLifecycleConfiguration', () => {
done(); done();
}); });
}); });
it('should apply all unique Key tags if multiple tags included', done => {
tagObj.label = 'mult-tags';
generateParsedXml('Filter', tagObj, parsedXml => {
const lcConfig = new LifecycleConfiguration(parsedXml).
getLifecycleConfiguration();
const expected = [{ key: 'color', val: 'blue' },
{ key: 'shape', val: 'circle' }];
assert.deepStrictEqual(expected, lcConfig.rules[0].filter.tags);
done();
});
});
}); });

View File

@ -1,74 +0,0 @@
const assert = require('assert');
const { parseString } = require('xml2js');
const werelogs = require('werelogs');
const ReplicationConfiguration =
require('../../../lib/models/ReplicationConfiguration');
const logger = new werelogs.Logger('test:ReplicationConfiguration');
const mockedConfig = {
replicationEndpoints: [{
type: 'scality',
site: 'ring',
default: true,
}, {
type: 'aws_s3',
site: 'awsbackend',
}, {
type: 'gcp',
site: 'gcpbackend',
}, {
type: 'azure',
site: 'azurebackend',
}],
};
function getXMLConfig(hasPreferredRead) {
return `
<ReplicationConfiguration>
<Role>arn:aws:iam::root:role/s3-replication-role</Role>
<Rule>
<ID>Replication-Rule-1</ID>
<Status>Enabled</Status>
<Prefix>someprefix/</Prefix>
<Destination>
<Bucket>arn:aws:s3:::destbucket</Bucket>
<StorageClass>awsbackend,` +
`gcpbackend${hasPreferredRead ? ':preferred_read' : ''},azurebackend` +
`</StorageClass>
</Destination>
</Rule>
</ReplicationConfiguration>
`;
}
describe('ReplicationConfiguration class', () => {
it('should parse replication config XML without preferred read', done => {
const repConfigXML = getXMLConfig(false);
parseString(repConfigXML, (err, parsedXml) => {
assert.ifError(err);
const repConf = new ReplicationConfiguration(
parsedXml, logger, mockedConfig);
const repConfErr = repConf.parseConfiguration();
assert.ifError(repConfErr);
assert.strictEqual(repConf.getPreferredReadLocation(), null);
done();
});
});
it('should parse replication config XML with preferred read', done => {
const repConfigXML = getXMLConfig(true);
parseString(repConfigXML, (err, parsedXml) => {
assert.ifError(err);
const repConf = new ReplicationConfiguration(
parsedXml, logger, mockedConfig);
const repConfErr = repConf.parseConfiguration();
assert.ifError(repConfErr);
assert.strictEqual(repConf.getPreferredReadLocation(),
'gcpbackend');
done();
});
});
});

View File

@ -82,7 +82,6 @@ describe('ObjectMD class setters/getters', () => {
role: '', role: '',
storageType: '', storageType: '',
dataStoreVersionId: '', dataStoreVersionId: '',
isNFS: null,
}], }],
['ReplicationInfo', { ['ReplicationInfo', {
status: 'PENDING', status: 'PENDING',
@ -98,11 +97,8 @@ describe('ObjectMD class setters/getters', () => {
'arn:aws:iam::account-id:role/dest-resource', 'arn:aws:iam::account-id:role/dest-resource',
storageType: 'aws_s3', storageType: 'aws_s3',
dataStoreVersionId: '', dataStoreVersionId: '',
isNFS: null,
}], }],
['DataStoreName', null, ''], ['DataStoreName', null, ''],
['ReplicationIsNFS', null, null],
['ReplicationIsNFS', true],
].forEach(test => { ].forEach(test => {
const property = test[0]; const property = test[0];
const testValue = test[1]; const testValue = test[1];
@ -196,15 +192,6 @@ describe('ObjectMD class setters/getters', () => {
assert.strictEqual( assert.strictEqual(
md.getReplicationSiteDataStoreVersionId('zenko'), 'a'); md.getReplicationSiteDataStoreVersionId('zenko'), 'a');
}); });
it('ObjectMd::isMultipartUpload', () => {
md.setContentMd5('68b329da9893e34099c7d8ad5cb9c940');
assert.strictEqual(md.isMultipartUpload(), false);
md.setContentMd5('741e0f4bad5b093044dc54a74d911094-1');
assert.strictEqual(md.isMultipartUpload(), true);
md.setContentMd5('bda0c0bed89c8bdb9e409df7ae7073c5-9876');
assert.strictEqual(md.isMultipartUpload(), true);
});
}); });
describe('ObjectMD import from stored blob', () => { describe('ObjectMD import from stored blob', () => {

View File

@ -1,170 +0,0 @@
const assert = require('assert');
const HealthProbeServer =
require('../../../../lib/network/probe/HealthProbeServer');
const http = require('http');
function makeRequest(meth, uri) {
const params = {
hostname: 'localhost',
port: 4042,
method: meth,
path: uri,
};
const req = http.request(params);
req.setNoDelay(true);
return req;
}
const endpoints = [
'/_/health/liveness',
'/_/health/readiness',
];
const badEndpoints = [
'/_/health/liveness_thisiswrong',
'/_/health/readiness_thisiswrong',
];
describe('network.probe.HealthProbeServer', () => {
describe('service is "up"', () => {
let server;
function setup(done) {
server = new HealthProbeServer({ port: 4042 });
server.start();
done();
}
before(done => {
setup(done);
});
after(done => {
server.stop();
done();
});
endpoints.forEach(ep => {
it('should perform a GET and ' +
'return 200 OK', done => {
makeRequest('GET', ep)
.on('response', res => {
assert(res.statusCode === 200);
done();
})
.on('error', err => {
assert.ifError(err);
done();
}).end();
});
});
});
describe('service is "down"', () => {
let server;
function setup(done) {
function falseStub() {
return false;
}
server = new HealthProbeServer({
port: 4042,
livenessCheck: falseStub,
readinessCheck: falseStub,
});
server.start();
done();
}
before(done => {
setup(done);
});
after(done => {
server.stop();
done();
});
endpoints.forEach(ep => {
it('should perform a GET and ' +
'return 503 ServiceUnavailable', done => {
makeRequest('GET', ep)
.on('response', res => {
assert(res.statusCode === 503);
done();
})
.on('error', err => {
assert.ifError(err);
done();
}).end();
});
});
});
describe('Invalid Methods', () => {
let server;
function setup(done) {
server = new HealthProbeServer({
port: 4042,
});
server.start();
done();
}
before(done => {
setup(done);
});
after(done => {
server.stop();
done();
});
endpoints.forEach(ep => {
it('should perform a POST and ' +
'return 405 MethodNotAllowed', done => {
makeRequest('POST', ep)
.on('response', res => {
assert(res.statusCode === 405);
done();
})
.on('error', err => {
assert.ifError(err);
done();
}).end();
});
});
});
describe('Invalid URI', () => {
let server;
function setup(done) {
server = new HealthProbeServer({
port: 4042,
});
server.start();
done();
}
before(done => {
setup(done);
});
after(done => {
server.stop();
done();
});
badEndpoints.forEach(ep => {
it('should perform a GET and ' +
'return 400 InvalidURI', done => {
makeRequest('GET', ep)
.on('response', res => {
assert(res.statusCode === 400);
done();
})
.on('error', err => {
assert.ifError(err);
done();
}).end();
});
});
});
});

View File

@ -4,9 +4,8 @@ const assert = require('assert');
const policyValidator = require('../../../lib/policy/policyValidator'); const policyValidator = require('../../../lib/policy/policyValidator');
const errors = require('../../../lib/errors'); const errors = require('../../../lib/errors');
const validateUserPolicy = policyValidator.validateUserPolicy; const validateUserPolicy = policyValidator.validateUserPolicy;
const validateResourcePolicy = policyValidator.validateResourcePolicy;
const successRes = { error: null, valid: true }; const successRes = { error: null, valid: true };
const sampleUserPolicy = { const samplePolicy = {
Version: '2012-10-17', Version: '2012-10-17',
Statement: { Statement: {
Sid: 'FooBar1234', Sid: 'FooBar1234',
@ -16,19 +15,6 @@ const sampleUserPolicy = {
Condition: { NumericLessThanEquals: { 's3:max-keys': '10' } }, Condition: { NumericLessThanEquals: { 's3:max-keys': '10' } },
}, },
}; };
const sampleResourcePolicy = {
Version: '2012-10-17',
Statement: [
{
Sid: 'ResourcePolicy1',
Effect: 'Allow',
Action: 's3:ListBucket',
Resource: 'arn:aws:s3:::example-bucket',
Condition: { StringLike: { 's3:prefix': 'foo' } },
Principal: '*',
},
],
};
const errDict = { const errDict = {
required: { required: {
@ -44,84 +30,45 @@ const errDict = {
Resource: 'Policy statement must contain resources.', Resource: 'Policy statement must contain resources.',
}, },
}; };
let policy;
function failRes(policyType, errDescription) { function failRes(errDescription) {
let error; const error = Object.assign({}, errors.MalformedPolicyDocument);
if (policyType === 'user') {
error = Object.assign({}, errors.MalformedPolicyDocument);
}
if (policyType === 'resource') {
error = Object.assign({}, errors.MalformedPolicy);
}
error.description = errDescription || error.description; error.description = errDescription || error.description;
return { error, valid: false }; return { error, valid: false };
} }
function check(input, expected, policyType) { function check(input, expected) {
let result; const result = validateUserPolicy(JSON.stringify(input));
if (policyType === 'user') {
result = validateUserPolicy(JSON.stringify(input));
}
if (policyType === 'resource') {
result = validateResourcePolicy(JSON.stringify(input));
}
assert.deepStrictEqual(result, expected); assert.deepStrictEqual(result, expected);
} }
let userPolicy;
let resourcePolicy;
const user = 'user';
const resource = 'resource';
beforeEach(() => { beforeEach(() => {
userPolicy = JSON.parse(JSON.stringify(sampleUserPolicy)); policy = JSON.parse(JSON.stringify(samplePolicy));
resourcePolicy = JSON.parse(JSON.stringify(sampleResourcePolicy));
}); });
describe('Policies validation - Invalid JSON', () => { describe('Policies validation - Invalid JSON', () => {
it('should return error for invalid user policy JSON', () => { it('should return error for invalid JSON', () => {
const result = validateUserPolicy('{"Version":"2012-10-17",' + const result = validateUserPolicy('{"Version":"2012-10-17",' +
'"Statement":{"Effect":"Allow""Action":"s3:PutObject",' + '"Statement":{"Effect":"Allow""Action":"s3:PutObject",' +
'"Resource":"arn:aws:s3*"}}'); '"Resource":"arn:aws:s3*"}}');
assert.deepStrictEqual(result, failRes(user)); assert.deepStrictEqual(result, failRes());
});
it('should return error for invaild resource policy JSON', () => {
const result = validateResourcePolicy('{"Version":"2012-10-17",' +
'"Statement":{"Effect":"Allow""Action":"s3:PutObject",' +
'"Resource":"arn:aws:s3*"}}');
assert.deepStrictEqual(result, failRes(resource));
}); });
}); });
describe('Policies validation - Version', () => { describe('Policies validation - Version', () => {
it('should validate user policy with version date 2012-10-17', () => { it('should validate with version date 2012-10-17', () => {
check(userPolicy, successRes, user); check(policy, successRes);
}); });
it('should validate resource policy with version date 2012-10-17', () => { it('should return error for other dates', () => {
check(resourcePolicy, successRes, 'resource'); policy.Version = '2012-11-17';
check(policy, failRes());
}); });
it('user policy should return error for other dates', () => { it('should return error if Version field is missing', () => {
userPolicy.Version = '2012-11-17'; policy.Version = undefined;
check(userPolicy, failRes(user), user); check(policy, failRes(errDict.required.Version));
});
it('resource policy should return error for other dates', () => {
resourcePolicy.Version = '2012-11-17';
check(resourcePolicy, failRes(resource), resource);
});
it('should return error if Version field in user policy is missing', () => {
userPolicy.Version = undefined;
check(userPolicy, failRes(user, errDict.required.Version), user);
});
it('should return error if Version field in resource policy is missing',
() => {
resourcePolicy.Version = undefined;
check(resourcePolicy, failRes(resource, errDict.required.Version),
resource);
}); });
}); });
@ -130,24 +77,20 @@ describe('Policies validation - Principal', () => {
{ {
name: 'an account id', name: 'an account id',
value: { AWS: '111111111111' }, value: { AWS: '111111111111' },
policyType: [user, resource],
}, },
{ {
name: 'anonymous user AWS form', name: 'anonymous user AWS form',
value: { AWS: '*' }, value: { AWS: '*' },
policyType: [user, resource],
}, },
{ {
name: 'an account arn', name: 'an account arn',
value: { AWS: 'arn:aws:iam::111111111111:root' }, value: { AWS: 'arn:aws:iam::111111111111:root' },
policyType: [user, resource],
}, },
{ {
name: 'multiple account id', name: 'multiple account id',
value: { value: {
AWS: ['111111111111', '111111111112'], AWS: ['111111111111', '111111111112'],
}, },
policyType: [user, resource],
}, },
{ {
name: 'multiple account arn', name: 'multiple account arn',
@ -157,17 +100,14 @@ describe('Policies validation - Principal', () => {
'arn:aws:iam::111111111112:root', 'arn:aws:iam::111111111112:root',
], ],
}, },
policyType: [user, resource],
}, },
{ {
name: 'anonymous user as string', name: 'anonymous user as string',
value: '*', value: '*',
policyType: [user, resource],
}, },
{ {
name: 'user arn', name: 'user arn',
value: { AWS: 'arn:aws:iam::111111111111:user/alex' }, value: { AWS: 'arn:aws:iam::111111111111:user/alex' },
policyType: [user, resource],
}, },
{ {
name: 'multiple user arns', name: 'multiple user arns',
@ -177,14 +117,12 @@ describe('Policies validation - Principal', () => {
'arn:aws:iam::111111111111:user/thibault', 'arn:aws:iam::111111111111:user/thibault',
], ],
}, },
policyType: [user, resource],
}, },
{ {
name: 'role arn', name: 'role arn',
value: { value: {
AWS: 'arn:aws:iam::111111111111:role/dev', AWS: 'arn:aws:iam::111111111111:role/dev',
}, },
policyType: [user, resource],
}, },
{ {
name: 'multiple role arn', name: 'multiple role arn',
@ -194,7 +132,6 @@ describe('Policies validation - Principal', () => {
'arn:aws:iam::111111111111:role/prod', 'arn:aws:iam::111111111111:role/prod',
], ],
}, },
policyType: [user, resource],
}, },
{ {
name: 'saml provider', name: 'saml provider',
@ -202,84 +139,57 @@ describe('Policies validation - Principal', () => {
Federated: Federated:
'arn:aws:iam::111111111111:saml-provider/mysamlprovider', 'arn:aws:iam::111111111111:saml-provider/mysamlprovider',
}, },
policyType: [user],
}, },
{ {
name: 'with backbeat service', name: 'with backbeat service',
value: { Service: 'backbeat' }, value: { Service: 'backbeat' },
policyType: [user, resource],
},
{
name: 'with canonical user id',
value: { CanonicalUser:
'1examplecanonicalid12345678909876' +
'54321qwerty12345asdfg67890z1x2c' },
policyType: [resource],
}, },
].forEach(test => { ].forEach(test => {
if (test.policyType.includes(user)) { it(`should allow principal field with ${test.name}`, () => {
it(`should allow user policy principal field with ${test.name}`, policy.Statement.Principal = test.value;
() => { delete policy.Statement.Resource;
userPolicy.Statement.Principal = test.value; check(policy, successRes);
delete userPolicy.Statement.Resource;
check(userPolicy, successRes, user);
}); });
it(`should allow user policy notPrincipal field with ${test.name}`, it(`shoud allow notPrincipal field with ${test.name}`, () => {
() => { policy.Statement.NotPrincipal = test.value;
userPolicy.Statement.NotPrincipal = test.value; delete policy.Statement.Resource;
delete userPolicy.Statement.Resource; check(policy, successRes);
check(userPolicy, successRes, user);
}); });
}
if (test.policyType.includes(resource)) {
it(`should allow resource policy principal field with ${test.name}`,
() => {
resourcePolicy.Statement[0].Principal = test.value;
check(resourcePolicy, successRes, resource);
});
}
}); });
[ [
{ {
name: 'wrong format account id', name: 'wrong format account id',
value: { AWS: '11111111111z' }, value: { AWS: '11111111111z' },
policyType: [user, resource],
}, },
{ {
name: 'empty string', name: 'empty string',
value: '', value: '',
policyType: [user, resource],
}, },
{ {
name: 'anonymous user federated form', name: 'anonymous user federated form',
value: { federated: '*' }, value: { federated: '*' },
policyType: [user, resource],
}, },
{ {
name: 'wildcard in resource', name: 'wildcard in ressource',
value: { AWS: 'arn:aws:iam::111111111111:user/*' }, value: { AWS: 'arn:aws:iam::111111111111:user/*' },
policyType: [user, resource],
}, },
{ {
name: 'a malformed account arn', name: 'a malformed account arn',
value: { AWS: 'arn:aws:iam::111111111111:' }, value: { AWS: 'arn:aws:iam::111111111111:' },
policyType: [user, resource],
}, },
{ {
name: 'multiple malformed account id', name: 'multiple malformed account id',
value: { value: {
AWS: ['1111111111z1', '1111z1111112'], AWS: ['1111111111z1', '1111z1111112'],
}, },
policyType: [user, resource],
}, },
{ {
name: 'multiple anonymous', name: 'multiple anonymous',
value: { value: {
AWS: ['*', '*'], AWS: ['*', '*'],
}, },
policyType: [user, resource],
}, },
{ {
name: 'multiple malformed account arn', name: 'multiple malformed account arn',
@ -289,22 +199,18 @@ describe('Policies validation - Principal', () => {
'arn:aws:iam::111111111112:', 'arn:aws:iam::111111111112:',
], ],
}, },
policyType: [user, resource],
}, },
{ {
name: 'account id as a string', name: 'account id as a string',
value: '111111111111', value: '111111111111',
policyType: [user, resource],
}, },
{ {
name: 'account arn as a string', name: 'account arn as a string',
value: 'arn:aws:iam::111111111111:root', value: 'arn:aws:iam::111111111111:root',
policyType: [user, resource],
}, },
{ {
name: 'user arn as a string', name: 'user arn as a string',
value: 'arn:aws:iam::111111111111:user/alex', value: 'arn:aws:iam::111111111111:user/alex',
policyType: [user, resource],
}, },
{ {
name: 'multiple malformed user arns', name: 'multiple malformed user arns',
@ -314,14 +220,12 @@ describe('Policies validation - Principal', () => {
'arn:aws:iam::111111111111:user/', 'arn:aws:iam::111111111111:user/',
], ],
}, },
policyType: [user, resource],
}, },
{ {
name: 'malformed role arn', name: 'malformed role arn',
value: { value: {
AWS: 'arn:aws:iam::111111111111:role/', AWS: 'arn:aws:iam::111111111111:role/',
}, },
policyType: [user, resource],
}, },
{ {
name: 'multiple malformed role arn', name: 'multiple malformed role arn',
@ -331,84 +235,36 @@ describe('Policies validation - Principal', () => {
'arn:aws:iam::11111111z111:role/prod', 'arn:aws:iam::11111111z111:role/prod',
], ],
}, },
policyType: [user, resource],
}, },
{ {
name: 'saml provider as a string', name: 'saml provider as a string',
value: 'arn:aws:iam::111111111111:saml-provider/mysamlprovider', value: 'arn:aws:iam::111111111111:saml-provider/mysamlprovider',
policyType: [user],
}, },
{ {
name: 'with other service than backbeat', name: 'with other service than backbeat',
value: { Service: 'non-existent-service' }, value: { Service: 'non-existent-service' },
policyType: [user, resource],
},
{
name: 'invalid canonical user',
value: { CanonicalUser:
'12345invalid-canonical-id$$$//098' +
'7654321poiu1q2w3e4r5t6y7u8i9o0p' },
policyType: [resource],
}, },
].forEach(test => { ].forEach(test => {
if (test.policyType.includes(user)) { it(`should fail with ${test.name}`, () => {
it(`user policy should fail with ${test.name}`, () => { policy.Statement.Principal = test.value;
userPolicy.Statement.Principal = test.value; delete policy.Statement.Resource;
delete userPolicy.Statement.Resource; check(policy, failRes());
check(userPolicy, failRes(user), user);
}); });
}
if (test.policyType.includes(resource)) {
it(`resource policy should fail with ${test.name}`, () => {
resourcePolicy.Statement[0].Principal = test.value;
check(resourcePolicy, failRes(resource), resource);
});
}
}); });
it('should not allow Resource field', () => { it('should not allow Resource field', () => {
userPolicy.Statement.Principal = '*'; policy.Statement.Principal = '*';
check(userPolicy, failRes(user), user); check(policy, failRes());
}); });
}); });
describe('Policies validation - Statement', () => { describe('Policies validation - Statement', () => {
[ it('should succeed for a valid object', () => {
{ check(policy, successRes);
name: 'should return error for undefined',
value: undefined,
},
{
name: 'should return an error for an empty list',
value: [],
},
{
name: 'should return an error for an empty object',
value: {},
errMessage: errDict.required.Action,
},
].forEach(test => {
it(`user policy ${test.name}`, () => {
userPolicy.Statement = test.value;
check(userPolicy, failRes(user, test.errMessage), user);
}); });
it(`resource policy ${test.name}`, () => { it('should succeed for a valid array', () => {
resourcePolicy.Statement = test.value; policy.Statement = [
check(resourcePolicy, failRes(resource, test.errMessage), resource);
});
});
it('user policy should succeed for a valid object', () => {
check(userPolicy, successRes, user);
});
it('resource policy should succeed for a valid object', () => {
check(resourcePolicy, successRes, resource);
});
it('user policy should succeed for a valid object', () => {
userPolicy.Statement = [
{ {
Effect: 'Allow', Effect: 'Allow',
Action: 's3:PutObject', Action: 's3:PutObject',
@ -420,373 +276,255 @@ describe('Policies validation - Statement', () => {
Resource: 'arn:aws:s3:::my_bucket/uploads/widgetco/*', Resource: 'arn:aws:s3:::my_bucket/uploads/widgetco/*',
}, },
]; ];
check(userPolicy, successRes, user); check(policy, successRes);
}); });
it('resource policy should succeed for a valid object', () => { it('should return an error for undefined', () => {
resourcePolicy.Statement = [ policy.Statement = undefined;
{ check(policy, failRes());
Effect: 'Allow',
Action: 's3:PutObject',
Resource: 'arn:aws:s3:::my_bucket/uploads/widgetco/*',
Principal: '*',
},
{
Effect: 'Deny',
Action: 's3:DeleteObject',
Resource: 'arn:aws:s3:::my_bucket/uploads/widgetco/*',
Principal: '*',
},
];
check(resourcePolicy, successRes, resource);
}); });
[ it('should return an error for an empty list', () => {
{ policy.Statement = [];
name: 'should return error for missing a required field - Action', check(policy, failRes());
toDelete: ['Action'],
expected: 'fail',
errMessage: errDict.required.Action,
},
{
name: 'should return error for missing a required field - Effect',
toDelete: ['Effect'],
expected: 'fail',
},
{
name: 'should return error for missing required field - Resource',
toDelete: ['Resource'],
expected: 'fail',
},
{
name: 'should return error for missing multiple required fields',
toDelete: ['Effect', 'Resource'],
expected: 'fail',
},
{
name: 'should succeed w optional fields missing - Sid, Condition',
toDelete: ['Sid', 'Condition'],
expected: successRes,
},
].forEach(test => {
it(`user policy ${test.name}`, () => {
test.toDelete.forEach(p => delete userPolicy.Statement[p]);
if (test.expected === 'fail') {
check(userPolicy, failRes(user, test.errMessage), user);
} else {
check(userPolicy, test.expected, user);
}
}); });
it(`resource policy ${test.name}`, () => { it('should return an error for an empty object', () => {
test.toDelete.forEach(p => delete resourcePolicy.Statement[0][p]); policy.Statement = {};
if (test.expected === 'fail') { check(policy, failRes(errDict.required.Action));
check(resourcePolicy, failRes(resource, test.errMessage),
resource);
} else {
check(resourcePolicy, test.expected, resource);
}
}); });
it('should return an error for missing a required field - Action', () => {
delete policy.Statement.Action;
check(policy, failRes(errDict.required.Action));
});
it('should return an error for missing a required field - Effect', () => {
delete policy.Statement.Effect;
check(policy, failRes());
});
it('should return an error for missing a required field - Resource', () => {
delete policy.Statement.Resource;
check(policy, failRes());
});
it('should return an error for missing multiple required fields', () => {
delete policy.Statement.Effect;
delete policy.Statement.Resource;
check(policy, failRes());
});
it('should succeed with optional fields missing - Sid, Condition', () => {
delete policy.Statement.Sid;
delete policy.Statement.Condition;
check(policy, successRes);
}); });
}); });
describe('Policies validation - Statement::Sid_block', () => { describe('Policies validation - Statement::Sid_block', () => {
it('user policy should succeed if Sid is any alphanumeric string', () => { it('should succeed if Sid is any alphanumeric string', () => {
check(userPolicy, successRes, user); check(policy, successRes);
}); });
it('resource policy should succeed if Sid is any alphanumeric string', it('should fail if Sid is not a valid format', () => {
() => { policy.Statement.Sid = 'foo bar()';
check(resourcePolicy, successRes, resource); check(policy, failRes());
}); });
it('user policy should fail if Sid is not a valid format', () => { it('should fail if Sid is not a string', () => {
userPolicy.Statement.Sid = 'foo bar()'; policy.Statement.Sid = 1234;
check(userPolicy, failRes(user), user); check(policy, failRes());
});
it('resource policy should fail if Sid is not a valid format', () => {
resourcePolicy.Statement[0].Sid = 'foo bar()';
check(resourcePolicy, failRes(resource), resource);
});
it('user policy should fail if Sid is not a string', () => {
userPolicy.Statement.Sid = 1234;
check(userPolicy, failRes(user), user);
});
it('resource policy should fail if Sid is not a string', () => {
resourcePolicy.Statement[0].Sid = 1234;
check(resourcePolicy, failRes(resource), resource);
}); });
}); });
describe('Policies validation - Statement::Effect_block', () => { describe('Policies validation - Statement::Effect_block', () => {
it('user policy should succeed for Allow', () => { it('should succeed for Allow', () => {
check(userPolicy, successRes, user); check(policy, successRes);
}); });
it('resource policy should succeed for Allow', () => { it('should succeed for Deny', () => {
check(resourcePolicy, successRes, resource); policy.Statement.Effect = 'Deny';
check(policy, successRes);
}); });
it('user policy should succeed for Deny', () => { it('should fail for strings other than Allow/Deny', () => {
userPolicy.Statement.Effect = 'Deny'; policy.Statement.Effect = 'Reject';
check(userPolicy, successRes, user); check(policy, failRes());
}); });
it('resource policy should succeed for Deny', () => { it('should fail if Effect is not a string', () => {
resourcePolicy.Statement[0].Effect = 'Deny'; policy.Statement.Effect = 1;
check(resourcePolicy, successRes, resource); check(policy, failRes());
});
it('user policy should fail for strings other than Allow/Deny', () => {
userPolicy.Statement.Effect = 'Reject';
check(userPolicy, failRes(user), user);
});
it('resource policy should fail for strings other than Allow/Deny', () => {
resourcePolicy.Statement[0].Effect = 'Reject';
check(resourcePolicy, failRes(resource), resource);
});
it('user policy should fail if Effect is not a string', () => {
userPolicy.Statement.Effect = 1;
check(userPolicy, failRes(user), user);
});
it('resource policy should fail if Effect is not a string', () => {
resourcePolicy.Statement[0].Effect = 1;
check(resourcePolicy, failRes(resource), resource);
}); });
}); });
const actionTests = [ describe('Policies validation - Statement::Action_block/' +
{
name: 'should succeed for foo:bar',
value: 'foo:bar',
expected: successRes,
},
{
name: 'should succeed for foo:*',
value: 'foo:*',
expected: successRes,
},
{
name: 'should succeed for *',
value: '*',
expected: successRes,
},
{
name: 'should fail for **',
value: '**',
expected: 'fail',
errMessage: errDict.pattern.Action,
},
{
name: 'should fail for foobar',
value: 'foobar',
expected: 'fail',
errMessage: errDict.pattern.Action,
},
];
describe('User policies validation - Statement::Action_block/' +
'Statement::NotAction_block', () => { 'Statement::NotAction_block', () => {
beforeEach(() => { beforeEach(() => {
userPolicy.Statement.Action = undefined; policy.Statement.Action = undefined;
userPolicy.Statement.NotAction = undefined; policy.Statement.NotAction = undefined;
}); });
actionTests.forEach(test => { it('should succeed for foo:bar', () => {
it(`${test.name}`, () => { policy.Statement.Action = 'foo:bar';
userPolicy.Statement.Action = test.value; check(policy, successRes);
if (test.expected === 'fail') {
check(userPolicy, failRes(user, test.errMessage), user);
} else {
check(userPolicy, test.expected, user);
}
userPolicy.Statement.Action = undefined; policy.Statement.Action = undefined;
userPolicy.Statement.NotAction = test.value; policy.Statement.NotAction = 'foo:bar';
if (test.expected === 'fail') { check(policy, successRes);
check(userPolicy, failRes(user, test.errMessage), user);
} else {
check(userPolicy, test.expected, user);
}
}); });
it('should succeed for foo:*', () => {
policy.Statement.Action = 'foo:*';
check(policy, successRes);
policy.Statement.Action = undefined;
policy.Statement.NotAction = 'foo:*';
check(policy, successRes);
});
it('should succeed for *', () => {
policy.Statement.Action = '*';
check(policy, successRes);
policy.Statement.Action = undefined;
policy.Statement.NotAction = '*';
check(policy, successRes);
});
it('should fail for **', () => {
policy.Statement.Action = '**';
check(policy, failRes(errDict.pattern.Action));
policy.Statement.Action = undefined;
policy.Statement.NotAction = '**';
check(policy, failRes(errDict.pattern.Action));
});
it('should fail for foobar', () => {
policy.Statement.Action = 'foobar';
check(policy, failRes(errDict.pattern.Action));
policy.Statement.Action = undefined;
policy.Statement.NotAction = 'foobar';
check(policy, failRes(errDict.pattern.Action));
}); });
}); });
describe('Resource policies validation - Statement::Action_block', () => { describe('Policies validation - Statement::Resource_block' +
actionTests.forEach(test => {
it(`${test.name}`, () => {
resourcePolicy.Statement[0].Action = test.value;
if (test.expected === 'fail') {
check(resourcePolicy, failRes(resource, test.errMessage),
resource);
} else {
check(resourcePolicy, test.expected, resource);
}
});
});
});
const resourceTests = [
{
name: 'should succeed for arn:aws::s3:::*',
value: 'arn:aws:s3:::*',
expected: successRes,
},
{
name: 'should succeed for arn:aws:s3:::test/home/${aws:username}',
value: 'arn:aws:s3:::test/home/${aws:username}',
expected: successRes,
},
{
name: 'should succeed for arn:aws:ec2:us-west-1:1234567890:vol/*',
value: 'arn:aws:ec2:us-west-1:1234567890:vol/*',
expected: successRes,
},
{
name: 'should succeed for *',
value: '*',
expected: successRes,
},
{
name: 'should fail for arn:aws:ec2:us-west-1:vol/* - missing region',
value: 'arn:aws:ec2:us-west-1:vol/*',
expected: 'fail',
errMessage: errDict.pattern.Resource,
},
{
name: 'should fail for arn:aws:ec2:us-west-1:123456789:v/${} - ${}',
value: 'arn:aws:ec2:us-west-1:123456789:v/${}',
expected: 'fail',
errMessage: errDict.pattern.Resource,
},
{
name: 'should fail for ec2:us-west-1:qwerty:vol/* - missing arn:aws:',
value: 'ec2:us-west-1:123456789012:vol/*',
expected: 'fail',
errMessage: errDict.pattern.Resource,
},
];
describe('User policies validation - Statement::Resource_block' +
'Statement::NotResource_block', () => { 'Statement::NotResource_block', () => {
beforeEach(() => { beforeEach(() => {
userPolicy.Statement.Resource = undefined; policy.Statement.Resource = undefined;
userPolicy.Statement.NotResource = undefined; policy.Statement.NotResource = undefined;
}); });
resourceTests.forEach(test => { it('should succeed for arn:aws:s3:::*', () => {
it(`${test.name}`, () => { policy.Statement.Resource = 'arn:aws:s3:::*';
userPolicy.Statement.Resource = test.value; check(policy, successRes);
if (test.expected === 'fail') {
check(userPolicy, failRes(user, test.errMessage), user);
} else {
check(userPolicy, test.expected, user);
}
userPolicy.Statement.Resource = undefined; policy.Statement.Resource = undefined;
userPolicy.Statement.NotResource = test.value; policy.Statement.NotResource = 'arn:aws:s3:::*';
if (test.expected === 'fail') { check(policy, successRes);
check(userPolicy, failRes(user, test.errMessage), user);
} else {
check(userPolicy, test.expected, user);
}
}); });
it('should succeed for arn:aws:s3:::test/home/${aws:username}', () => {
policy.Statement.Resource = 'arn:aws:s3:::test/home/${aws:username}';
check(policy, successRes);
policy.Statement.Resource = undefined;
policy.Statement.NotResource = 'arn:aws:s3:::test/home/${aws:username}';
check(policy, successRes);
});
it('should succeed for arn:aws:ec2:us-west-1:1234567890:vol/*', () => {
policy.Statement.Resource = 'arn:aws:ec2:us-west-1:1234567890:vol/*';
check(policy, successRes);
policy.Statement.Resource = undefined;
policy.Statement.NotResource = 'arn:aws:ec2:us-west-1:1234567890:vol/*';
check(policy, successRes);
});
it('should succeed for *', () => {
policy.Statement.Resource = '*';
check(policy, successRes);
policy.Statement.Resource = undefined;
policy.Statement.NotResource = '*';
check(policy, successRes);
});
it('should fail for arn:aws:ec2:us-west-1:vol/* - missing region', () => {
policy.Statement.Resource = 'arn:aws:ec2:1234567890:vol/*';
check(policy, failRes(errDict.pattern.Resource));
policy.Statement.Resource = undefined;
policy.Statement.NotResource = 'arn:aws:ec2:1234567890:vol/*';
check(policy, failRes(errDict.pattern.Resource));
});
it('should fail for arn:aws:ec2:us-west-1:123456789:v/${} - ${}', () => {
policy.Statement.Resource = 'arn:aws:ec2:us-west-1:123456789:v/${}';
check(policy, failRes(errDict.pattern.Resource));
policy.Statement.Resource = undefined;
policy.Statement.NotResource = 'arn:aws:ec2:us-west-1:123456789:v/${}';
check(policy, failRes(errDict.pattern.Resource));
});
it('should fail for ec2:us-west-1:qwerty:vol/* - missing arn:aws:', () => {
policy.Statement.Resource = 'ec2:us-west-1:123456789012:vol/*';
check(policy, failRes(errDict.pattern.Resource));
policy.Statement.Resource = undefined;
policy.Statement.NotResource = 'ec2:us-west-1:123456789012:vol/*';
check(policy, failRes(errDict.pattern.Resource));
}); });
it('should fail for empty list of resources', () => { it('should fail for empty list of resources', () => {
userPolicy.Statement.Resource = []; policy.Statement.Resource = [];
check(userPolicy, failRes(user, errDict.minItems.Resource), user); check(policy, failRes(errDict.minItems.Resource));
});
});
describe('Resource policies validation - Statement::Resource_block', () => {
resourceTests.forEach(test => {
it(`${test.name}`, () => {
resourcePolicy.Statement[0].Resource = test.value;
if (test.expected === 'fail') {
check(resourcePolicy, failRes(resource, test.errMessage),
resource);
} else {
check(resourcePolicy, test.expected, resource);
}
});
});
it('should fail for empty list of resources', () => {
resourcePolicy.Statement[0].Resource = [];
check(resourcePolicy, failRes(resource, errDict.minItems.Resource),
resource);
}); });
}); });
describe('Policies validation - Statement::Condition_block', () => { describe('Policies validation - Statement::Condition_block', () => {
it('user policy should succeed for single Condition', () => { it('should succeed for single Condition', () => {
check(userPolicy, successRes, user); check(policy, successRes);
}); });
it('resource policy should succeed for single Condition', () => { it('should succeed for multiple Conditions', () => {
check(resourcePolicy, successRes, resource); policy.Statement.Condition = {
});
[
{
name: 'should succeed for multiple Conditions',
value: {
StringNotLike: { 's3:prefix': ['Development/*'] }, StringNotLike: { 's3:prefix': ['Development/*'] },
Null: { 's3:prefix': false }, Null: { 's3:prefix': false },
}, };
expected: successRes, check(policy, successRes);
},
{
name: 'should fail when Condition is not an Object',
value: 'NumericLessThanEquals',
expected: 'fail',
},
{
name: 'should fail for an invalid Condition',
value: {
SomethingLike: { 's3:prefix': ['Development/*'] },
},
expected: 'fail',
},
{
name: 'should fail when one of the multiple conditions is invalid',
value: {
Null: { 's3:prefix': false },
SomethingLike: { 's3:prefix': ['Development/*'] },
},
expected: 'fail',
},
{
name: 'should fail when invalid property is assigned',
value: {
SomethingLike: { 's3:prefix': ['Development/*'] },
},
expected: 'fail',
},
].forEach(test => {
it(`user policy ${test.name}`, () => {
userPolicy.Statement.Condition = test.value;
if (test.expected === 'fail') {
check(userPolicy, failRes(user), user);
} else {
check(userPolicy, test.expected, user);
}
}); });
it(`resource policy ${test.name}`, () => { it('should fail when Condition is not an Object', () => {
resourcePolicy.Statement[0].Condition = test.value; policy.Statement.Condition = 'NumericLessThanEquals';
if (test.expected === 'fail') { check(policy, failRes());
check(resourcePolicy, failRes(resource), resource); });
} else {
check(resourcePolicy, test.expected, resource); it('should fail for an invalid Condition', () => {
} policy.Statement.Condition = {
}); SomethingLike: { 's3:prefix': ['Development/*'] },
};
check(policy, failRes());
});
it('should fail when one of the multiple conditions is invalid', () => {
policy.Statement.Condition = {
Null: { 's3:prefix': false },
SomethingLike: { 's3:prefix': ['Development/*'] },
};
check(policy, failRes());
});
it('should fail when invalid property is assigned', () => {
policy.Condition = {
SomethingLike: { 's3:prefix': ['Development/*'] },
};
check(policy, failRes());
}); });
}); });

View File

@ -7,7 +7,6 @@ const {
_checkEtagNoneMatch, _checkEtagNoneMatch,
_checkModifiedSince, _checkModifiedSince,
_checkUnmodifiedSince, _checkUnmodifiedSince,
checkDateModifiedHeaders,
validateConditionalHeaders, validateConditionalHeaders,
} = require('../../../lib/s3middleware/validateConditionalHeaders'); } = require('../../../lib/s3middleware/validateConditionalHeaders');
@ -173,59 +172,6 @@ describe('validateConditionalHeaders util function ::', () => {
}); });
}); });
describe('checkDateModifiedHeaders util function: ', () => {
const expectedSuccess = {
present: true,
error: null,
};
const expectedAbsense = {
present: false,
error: null,
};
it('should return NotModified error for \'if-modified-since\' header',
() => {
const header = {};
header['if-modified-since'] = afterLastModified;
const { modifiedSinceRes, unmodifiedSinceRes } =
checkDateModifiedHeaders(header, lastModified);
assert.deepStrictEqual(modifiedSinceRes.error, errors.NotModified);
assert.deepStrictEqual(unmodifiedSinceRes, expectedAbsense);
});
it('should return PreconditionFailed error for \'if-unmodified-since\' ' +
'header', () => {
const header = {};
header['if-unmodified-since'] = beforeLastModified;
const { modifiedSinceRes, unmodifiedSinceRes } =
checkDateModifiedHeaders(header, lastModified);
assert.deepStrictEqual(unmodifiedSinceRes.error,
errors.PreconditionFailed);
assert.deepStrictEqual(modifiedSinceRes, expectedAbsense);
});
it('should succeed if \'if-modified-since\' header value is earlier ' +
'than last modified', () => {
const header = {};
header['if-modified-since'] = beforeLastModified;
const { modifiedSinceRes, unmodifiedSinceRes } =
checkDateModifiedHeaders(header, lastModified);
assert.deepStrictEqual(modifiedSinceRes, expectedSuccess);
assert.deepStrictEqual(unmodifiedSinceRes, expectedAbsense);
});
it('should succeed if \'if-unmodified-since\' header value is later ' +
'than last modified', () => {
const header = {};
header['if-unmodified-since'] = afterLastModified;
const { modifiedSinceRes, unmodifiedSinceRes } =
checkDateModifiedHeaders(header, lastModified);
assert.deepStrictEqual(unmodifiedSinceRes, expectedSuccess);
assert.deepStrictEqual(modifiedSinceRes, expectedAbsense);
});
});
describe('_checkEtagMatch function :', () => { describe('_checkEtagMatch function :', () => {
const expectedSuccess = { const expectedSuccess = {
present: true, present: true,

View File

@ -1,36 +0,0 @@
const assert = require('assert');
const routesUtils = require('../../../../lib/s3routes/routesUtils.js');
const bannedStr = 'banned';
const prefixBlacklist = [];
// byte size of 915
const keyutf8 = '%EA%9D%8B崰㈌㒈保轖䳷䀰⺩ቆ楪秲ⴝ㿅鼎퇬枅࿷염곞召㸾⌙ꪊᆐ庍뉆䌗幐鸆䛃➟녩' +
'ˍ뙪臅⠙≼绒벊냂詴 끴鹲萯⇂㭢䈊퉉楝舳㷖족痴䧫㾵᏷ำꎆ꼵껪멷㄀誕㳓腜쒃컹㑻鳃삚舿췈孨੦⮀NJ곓⵪꺼꜈' +
'嗼뫘悕錸瑺⁤륒㜓垻ㆩꝿ詀펉ᆙ舑䜾힑藪碙ꀎꂰ췊Ᏻ 㘺幽醛잯ද汧Ꟑꛒⶨ쪸숞헹㭔ꡔᘼ뺓ᡆ᡾ᑟ䅅퀭耓弧⢠⇙' +
'폪ް蛧⃪Ἔ돫ꕢ븥ヲ캂䝄쟐颺ᓾ둾Ұ껗礞ᾰ瘹蒯硳풛瞋襎奺熝妒컚쉴⿂㽝㝳駵鈚䄖戭䌸᫲ᇁ䙪鸮ᐴ稫ⶭ뀟ھ⦿' +
'䴳稉ꉕ捈袿놾띐✯伤䃫⸧ꠏ瘌틳藔ˋ㫣敀䔩㭘식↴⧵佶痊牌ꪌ搒꾛æᤈべ쉴挜敗羥誜嘳ֶꫜ걵ࣀ묟ኋ拃秷膤䨸菥' +
'䟆곘縧멀煣卲챸⧃⏶혣ਔ뙞밺㊑ک씌촃Ȅ頰ᖅ懚ホῐ꠷㯢먈㝹୥밷㮇䘖桲阥黾噘烻ᓧ鈠ᴥ徰穆ꘛ蹕綻表鯍裊' +
'鮕漨踒ꠍ픸Ä☶莒浏钸목탬툖氭ˠٸ൪㤌ᶟ訧ᜒೳ揪Ⴛ摖㸣᳑⹞걀ꢢ䏹ῖ"';
describe('routesUtils.isValidObjectKey', () => {
it('should return isValid false if object key name starts with a ' +
'blacklisted prefix', () => {
const result = routesUtils.isValidObjectKey('bannedkey', [bannedStr]);
// return { isValid: false, invalidPrefix };
assert.strictEqual(result.isValid, false);
assert.strictEqual(result.invalidPrefix, bannedStr);
});
it('should return isValid false if object key name exceeds length of 915',
() => {
const key = 'a'.repeat(916);
const result = routesUtils.isValidObjectKey(key, prefixBlacklist);
assert.strictEqual(result.isValid, false);
});
it('should return isValid true for a utf8 string of byte size 915', () => {
const result = routesUtils.isValidObjectKey(keyutf8, prefixBlacklist);
assert.strictEqual(result.isValid, true);
});
});

View File

@ -1,76 +0,0 @@
'use strict'; //eslint-disable-line
const assert = require('assert');
const { markerFilterMPU } =
require('../../../../../lib/storage/metadata/in_memory/bucket_utilities');
function dupeArray(arr) {
const dupe = [];
arr.forEach(i => {
dupe.push(Object.assign({}, i));
});
return dupe;
}
describe('bucket utility methods for in_memory backend', () => {
it('should return an array of multipart uploads starting with the item ' +
'right after the specified keyMarker and uploadIdMarker', () => {
const mpus = [
{
key: 'key-1',
uploadId: '2624ca6080c841d48a2481941df868a9',
},
{
key: 'key-1',
uploadId: '4ffeca96b0c24ea9b538b8f0b60cede3',
},
{
key: 'key-1',
uploadId: '52e5b94474894990a2b94330bb3c8fa9',
},
{
key: 'key-1',
uploadId: '54e530c5d4c741898c8e161d426591cb',
},
{
key: 'key-1',
uploadId: '6cc59f9d29254e81ab6cb6332fb46314',
},
{
key: 'key-1',
uploadId: 'fe9dd10776c9476697632d0b55960a05',
},
{
key: 'key-2',
uploadId: '68e24ccb96c14beea79bf01fc130fdf5',
},
];
[
{
keyMarker: 'key-1',
uploadIdMarker: '54e530c5d4c741898c8e161d426591cb',
expected: 3,
},
{
keyMarker: 'key-2',
uploadIdMarker: '68e24ccb96c14beea79bf01fc130fdf5',
expected: 0,
},
{
keyMarker: 'key-1',
uploadIdMarker: '2624ca6080c841d48a2481941df868a9',
expected: 6,
},
].forEach(item => {
const res = markerFilterMPU(item, dupeArray(mpus));
assert.equal(res.length, item.expected);
const expected = mpus.slice(mpus.length - res.length);
assert.deepStrictEqual(res, expected);
});
});
});

View File

@ -1,507 +0,0 @@
const assert = require('assert');
const {
NEW_OBJ,
NEW_VER,
UPDATE_VER,
UPDATE_MST,
RESTORE,
DEL_VER,
DEL_MST,
DataCounter,
} = require('../../../../../lib/storage/metadata/mongoclient/DataCounter');
const refZeroObj = {
objects: 0,
versions: 0,
dataManaged: {
total: { curr: 0, prev: 0 },
byLocation: {},
},
stalled: 0,
};
const refSingleObj = {
objects: 2,
versions: 0,
dataManaged: {
total: { curr: 200, prev: 0 },
byLocation: {
locationOne: { curr: 200, prev: 0 },
},
},
stalled: 0,
};
const refSingleObjVer = {
objects: 1,
versions: 1,
dataManaged: {
total: { curr: 100, prev: 100 },
byLocation: {
locationOne: { curr: 100, prev: 100 },
},
},
stalled: 0,
};
const refMultiObjVer = {
objects: 1,
versions: 1,
dataManaged: {
total: { curr: 200, prev: 200 },
byLocation: {
locationOne: { curr: 100, prev: 100 },
locationTwo: { curr: 100, prev: 100 },
},
},
stalled: 0,
};
const refMultiObj = {
objects: 2,
versions: 0,
dataManaged: {
total: { curr: 400, prev: 0 },
byLocation: {
locationOne: { curr: 200, prev: 0 },
locationTwo: { curr: 200, prev: 0 },
},
},
stalled: 0,
};
const singleSite = size => ({
'content-length': size,
'dataStoreName': 'locationOne',
'replicationInfo': {
backends: [],
},
});
const multiSite = (size, isComplete) => ({
'content-length': size,
'dataStoreName': 'locationOne',
'replicationInfo': {
backends: [{
site: 'locationTwo',
status: isComplete ? 'COMPLETED' : 'PENDING',
}],
},
});
const transientSite = (size, status, backends) => ({
'content-length': size,
'dataStoreName': 'locationOne',
'replicationInfo': { status, backends },
});
const locationConstraints = {
locationOne: { isTransient: true },
locationTwo: { isTransient: false },
};
const dataCounter = new DataCounter();
describe('DataCounter Class', () => {
it('should create a zero object', () => {
dataCounter.set(refZeroObj);
assert.deepStrictEqual(dataCounter.results(), refZeroObj);
});
it('should skip dataCounter methods if initial values are not set', () => {
const testCounter = new DataCounter();
testCounter.addObject(singleSite(100), null, NEW_OBJ);
assert.deepStrictEqual(testCounter.results(), refZeroObj);
});
});
describe('DateCounter::updateTransientList', () => {
afterEach(() => dataCounter.updateTransientList({}));
it('should set transient list', () => {
assert.deepStrictEqual(dataCounter.transientList, {});
dataCounter.updateTransientList(locationConstraints);
const expectedRes = { locationOne: true, locationTwo: false };
assert.deepStrictEqual(dataCounter.transientList, expectedRes);
});
});
describe('DataCounter::addObject', () => {
const tests = [
{
it: 'should correctly update DataCounter, new object one site',
init: refZeroObj,
input: [singleSite(100), null, NEW_OBJ],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 100, prev: 0 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, new object multi site',
init: refZeroObj,
input: [multiSite(100, true), null, NEW_OBJ],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 200, prev: 0 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
locationTwo: { curr: 100, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, overwrite single site',
init: refSingleObj,
input: [singleSite(100), singleSite(50), NEW_OBJ],
expectedRes: {
objects: 2, versions: 0,
dataManaged: {
total: { curr: 250, prev: 0 },
byLocation: {
locationOne: { curr: 250, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, overwrite multi site',
init: refMultiObj,
input: [multiSite(100, true), multiSite(50, true), NEW_OBJ],
expectedRes: {
objects: 2, versions: 0,
dataManaged: {
total: { curr: 500, prev: 0 },
byLocation: {
locationOne: { curr: 250, prev: 0 },
locationTwo: { curr: 250, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, new version single site',
init: refSingleObj,
input: [singleSite(100), singleSite(50), NEW_VER],
expectedRes: {
objects: 2, versions: 1,
dataManaged: {
total: { curr: 250, prev: 50 },
byLocation: {
locationOne: { curr: 250, prev: 50 },
},
},
},
},
{
it: 'should correctly update DataCounter, new version multi site',
init: refMultiObj,
input: [multiSite(100, true), multiSite(50, true), NEW_VER],
expectedRes: {
objects: 2, versions: 1,
dataManaged: {
total: { curr: 500, prev: 100 },
byLocation: {
locationOne: { curr: 250, prev: 50 },
locationTwo: { curr: 250, prev: 50 },
},
},
},
},
{
it: 'should correctly ignore pending status, multi site',
init: refZeroObj,
input: [multiSite(100, false), null, NEW_OBJ],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 100, prev: 0 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'replication completion update in master object',
init: refSingleObj,
input: [multiSite(100, true), multiSite(100, false), UPDATE_MST],
expectedRes: {
objects: 2, versions: 0,
dataManaged: {
total: { curr: 300, prev: 0 },
byLocation: {
locationOne: { curr: 200, prev: 0 },
locationTwo: { curr: 100, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'replication completion update in versioned object',
init: refSingleObjVer,
input: [multiSite(100, true), multiSite(100, false), UPDATE_VER],
expectedRes: {
objects: 1, versions: 1,
dataManaged: {
total: { curr: 100, prev: 200 },
byLocation: {
locationOne: { curr: 100, prev: 100 },
locationTwo: { curr: 0, prev: 100 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'restoring versioned object as master',
init: refMultiObjVer,
input: [multiSite(100, true), multiSite(100, true), RESTORE],
expectedRes: {
objects: 2, versions: 0,
dataManaged: {
total: { curr: 400, prev: 0 },
byLocation: {
locationOne: { curr: 200, prev: 0 },
locationTwo: { curr: 200, prev: 0 },
},
},
},
},
];
tests.forEach(test => it(test.it, () => {
const { expectedRes, input, init } = test;
dataCounter.set(init);
dataCounter.addObject(...input);
const testResults = dataCounter.results();
Object.keys(expectedRes).forEach(key => {
if (typeof expectedRes[key] === 'object') {
assert.deepStrictEqual(testResults[key], expectedRes[key]);
} else {
assert.strictEqual(testResults[key], expectedRes[key]);
}
});
}));
});
describe('DataCounter, update with transient location', () => {
before(() => dataCounter.updateTransientList(locationConstraints));
after(() => dataCounter.updateTransientList({}));
const pCurrMD = transientSite(100, 'PENDING', [
{ site: 'site1', status: 'PENDING' },
{ site: 'site2', status: 'COMPLETED' },
]);
const cCurrMD = transientSite(100, 'COMPLETED', [
{ site: 'site1', status: 'COMPLETED' },
{ site: 'site2', status: 'COMPLETED' },
]);
const prevMD = transientSite(100, 'PENDING', [
{ site: 'site1', status: 'PENDING' },
{ site: 'site2', status: 'PENDING' },
]);
const transientTest = [
{
it: 'should correctly update DataCounter, ' +
'version object, replication status = PENDING',
init: refSingleObjVer,
input: [pCurrMD, prevMD, UPDATE_VER],
expectedRes: {
objects: 1, versions: 1,
dataManaged: {
total: { curr: 100, prev: 200 },
byLocation: {
locationOne: { curr: 100, prev: 100 },
site2: { curr: 0, prev: 100 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'version object, replication status = COMPLETED',
init: refSingleObjVer,
input: [cCurrMD, prevMD, UPDATE_VER],
expectedRes: {
objects: 1, versions: 1,
dataManaged: {
total: { curr: 100, prev: 200 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
site1: { curr: 0, prev: 100 },
site2: { curr: 0, prev: 100 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'master object, replication status = PENDING',
init: refSingleObjVer,
input: [pCurrMD, prevMD, UPDATE_MST],
expectedRes: {
objects: 1, versions: 1,
dataManaged: {
total: { curr: 200, prev: 100 },
byLocation: {
locationOne: { curr: 100, prev: 100 },
site2: { curr: 100, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'master object, replication status = COMPLETED',
init: refSingleObjVer,
input: [cCurrMD, prevMD, UPDATE_MST],
expectedRes: {
objects: 1, versions: 1,
dataManaged: {
total: { curr: 200, prev: 100 },
byLocation: {
locationOne: { curr: 0, prev: 100 },
site1: { curr: 100, prev: 0 },
site2: { curr: 100, prev: 0 },
},
},
},
},
];
transientTest.forEach(test => it(test.it, () => {
const { expectedRes, input, init } = test;
dataCounter.set(init);
dataCounter.addObject(...input);
const testResults = dataCounter.results();
Object.keys(expectedRes).forEach(key => {
if (typeof expectedRes[key] === 'object') {
assert.deepStrictEqual(testResults[key], expectedRes[key]);
} else {
assert.strictEqual(testResults[key], expectedRes[key]);
}
});
}));
});
describe('DataCounter::delObject', () => {
const tests = [
{
it: 'should correctly update DataCounter, ' +
'delete master object single site',
init: refMultiObj,
input: [singleSite(100), DEL_MST],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 300, prev: 0 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
locationTwo: { curr: 200, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'delete master object multi site',
init: refMultiObj,
input: [multiSite(100, true), DEL_MST],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 200, prev: 0 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
locationTwo: { curr: 100, prev: 0 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'delete versioned object single site',
init: refMultiObjVer,
input: [singleSite(100), DEL_VER],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 200, prev: 100 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
locationTwo: { curr: 100, prev: 100 },
},
},
},
},
{
it: 'should correctly update DataCounter, ' +
'delete versioned object multi site',
init: refMultiObjVer,
input: [multiSite(100, true), DEL_VER],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 200, prev: 0 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
locationTwo: { curr: 100, prev: 0 },
},
},
},
},
{
it: 'should clamp negative values to 0, master object',
init: refMultiObjVer,
input: [multiSite(300, true), DEL_MST],
expectedRes: {
objects: 0, versions: 1,
dataManaged: {
total: { curr: 0, prev: 200 },
byLocation: {
locationOne: { curr: 0, prev: 100 },
locationTwo: { curr: 0, prev: 100 },
},
},
},
},
{
it: 'should clamp negative values to 0, versioned object',
init: refMultiObjVer,
input: [multiSite(300, true), DEL_VER],
expectedRes: {
objects: 1, versions: 0,
dataManaged: {
total: { curr: 200, prev: 0 },
byLocation: {
locationOne: { curr: 100, prev: 0 },
locationTwo: { curr: 100, prev: 0 },
},
},
},
},
];
tests.forEach(test => it(test.it, () => {
const { expectedRes, input, init } = test;
dataCounter.set(init);
dataCounter.delObject(...input);
const testResults = dataCounter.results();
Object.keys(expectedRes).forEach(key => {
if (typeof expectedRes[key] === 'object') {
assert.deepStrictEqual(testResults[key], expectedRes[key]);
} else {
assert.strictEqual(testResults[key], expectedRes[key]);
}
});
}));
});

View File

@ -1,367 +0,0 @@
const assert = require('assert');
const { Timestamp } = require('bson');
const ListRecordStream = require(
'../../../../../lib/storage/metadata/mongoclient/ListRecordStream');
const DummyRequestLogger = require('./utils/DummyRequestLogger');
const logger = new DummyRequestLogger();
const mongoProcessedLogEntries = {
insert: {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'i',
ns: 'metadata.replicated-bucket',
o: {
_id: 'replicated-key\u000098467518084696999999RG001 19.3',
value: {
someField: 'someValue',
},
},
},
updateObject: {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'u',
ns: 'metadata.replicated-bucket',
o2: {
_id: 'replicated-key\u000098467518084696999999RG001 19.3',
},
o: {
$set: {
value: {
someField: 'someUpdatedValue',
},
},
},
},
deleteObject: {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'd',
ns: 'metadata.replicated-bucket',
o: {
_id: 'replicated-key\u000098467518084696999999RG001 19.3',
},
},
putBucketAttributes: {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'u',
ns: 'metadata.__metastore',
o2: {
_id: 'new-bucket',
}, o: {
_id: 'new-bucket',
value: {
someField: 'someValue',
},
},
},
deleteBucket: {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'd',
ns: 'metadata.__metastore',
o: {
_id: 'new-bucket',
},
},
};
const mongoIgnoredLogEntries = {
createBucket: {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'c',
ns: 'metadata.$cmd',
o: {
create: 'new-bucket',
idIndex: {
v: 2,
key: { _id: 1 },
name: '_id_',
ns: 'metadata.new-bucket',
},
},
},
dropBucketDb: {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'c',
ns: 'metadata.$cmd',
o: {
drop: 'new-bucket',
},
},
};
const expectedStreamEntries = {
insert: {
db: 'replicated-bucket',
entries: [
{
key: 'replicated-key\u000098467518084696999999RG001 19.3',
type: 'put',
value: '{"someField":"someValue"}',
},
],
timestamp: new Date(42000),
},
updateObject: {
db: 'replicated-bucket',
entries: [
{
key: 'replicated-key\u000098467518084696999999RG001 19.3',
type: 'put',
value: '{"someField":"someUpdatedValue"}',
},
],
timestamp: new Date(42000),
},
deleteObject: {
db: 'replicated-bucket',
entries: [
{
key: 'replicated-key\u000098467518084696999999RG001 19.3',
type: 'delete',
},
],
timestamp: new Date(42000),
},
putBucketAttributes: {
db: '__metastore',
entries: [
{
key: 'new-bucket',
type: 'put',
value: '{"someField":"someValue"}',
},
],
timestamp: new Date(42000),
},
deleteBucket: {
db: '__metastore',
entries: [
{
key: 'new-bucket',
type: 'delete',
},
],
timestamp: new Date(42000),
},
dropBucketDb: {
h: -42,
op: 'c',
ns: 'metadata.$cmd',
o: {
drop: 'new-bucket',
},
},
};
class MongoCursorMock {
constructor(itemsToYield, errorAtPos) {
this.itemsToYield = itemsToYield;
this.pos = 0;
this.errorAtPos = errorAtPos;
}
next(cb) {
// if there's no more item, just hang out there waiting for
// items that will never come (this is how the real mongo
// tailable cursor would behave)
if (this.pos === this.errorAtPos) {
return process.nextTick(() => cb(new Error('boo')));
}
if (!this.hasSentAllItems()) {
const pos = this.pos;
this.pos += 1;
return process.nextTick(() => cb(null, this.itemsToYield[pos]));
}
return undefined;
}
hasSentAllItems() {
return this.pos === this.itemsToYield.length;
}
}
describe('mongoclient.ListRecordStream', () => {
const lastEndIDEntry = {
h: -43,
ts: Timestamp.fromNumber(42),
};
Object.keys(mongoProcessedLogEntries).forEach(entryType => {
it(`should transform ${entryType}`, done => {
// first write will be ignored by ListRecordStream because
// of the last end ID (-42), it's needed though to bootstrap it
const cursor = new MongoCursorMock([
lastEndIDEntry,
mongoProcessedLogEntries[entryType],
]);
const lrs = new ListRecordStream(cursor, logger,
lastEndIDEntry.h.toString());
let hasReceivedData = false;
lrs.on('data', entry => {
assert.strictEqual(hasReceivedData, false);
hasReceivedData = true;
assert.deepStrictEqual(entry, expectedStreamEntries[entryType]);
if (cursor.hasSentAllItems()) {
assert.strictEqual(hasReceivedData, true);
assert.deepStrictEqual(JSON.parse(lrs.getOffset()),
{ uniqID: '-42' });
done();
}
});
});
});
it('should ignore other entry types', done => {
// first write will be ignored by ListRecordStream because
// of the last end ID (-43), it's needed though to bootstrap it
const logEntries = [lastEndIDEntry];
Object.keys(mongoIgnoredLogEntries).forEach(entryType => {
logEntries.push(mongoIgnoredLogEntries[entryType]);
});
const cursor = new MongoCursorMock(logEntries);
const lrs = new ListRecordStream(cursor, logger,
lastEndIDEntry.h.toString());
lrs.on('data', entry => {
assert(false, `ListRecordStream did not ignore entry ${entry}`);
});
setTimeout(() => {
assert.strictEqual(cursor.hasSentAllItems(), true);
assert.deepStrictEqual(JSON.parse(lrs.getOffset()),
{ uniqID: '-42' });
done();
}, 200);
});
it('should skip entries until uniqID is encountered', done => {
const logEntries = [
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 1234, ts: Timestamp.fromNumber(45) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 5678, ts: Timestamp.fromNumber(44) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: -1234, ts: Timestamp.fromNumber(42) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 2345, ts: Timestamp.fromNumber(42) }),
];
const cursor = new MongoCursorMock(logEntries);
const lrs = new ListRecordStream(cursor, logger, '5678');
assert.strictEqual(lrs.reachedUnpublishedListing(), false);
let nbReceivedEntries = 0;
lrs.on('data', entry => {
assert.deepStrictEqual(entry, expectedStreamEntries.insert);
assert.strictEqual(lrs.reachedUnpublishedListing(), true);
++nbReceivedEntries;
if (cursor.hasSentAllItems()) {
assert.strictEqual(nbReceivedEntries, 2);
assert.deepStrictEqual(JSON.parse(lrs.getOffset()),
{ uniqID: '2345' });
assert.strictEqual(lrs.getSkipCount(), 2);
assert.strictEqual(lrs.reachedUnpublishedListing(), true);
done();
}
});
});
it('should start after latest entry if uniqID is not encountered', done => {
const logEntries = [
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 1234, ts: Timestamp.fromNumber(45) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 5678, ts: Timestamp.fromNumber(44) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: -1234, ts: Timestamp.fromNumber(42) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 2345, ts: Timestamp.fromNumber(42) }),
];
const cursor = new MongoCursorMock(logEntries);
const lrs = new ListRecordStream(cursor, logger, '4242', '-1234');
let nbReceivedEntries = 0;
lrs.on('data', entry => {
assert.deepStrictEqual(entry, expectedStreamEntries.insert);
++nbReceivedEntries;
if (cursor.hasSentAllItems()) {
assert.strictEqual(nbReceivedEntries, 1);
assert.deepStrictEqual(JSON.parse(lrs.getOffset()),
{ uniqID: '2345' });
assert.strictEqual(lrs.getSkipCount(), 3);
assert.strictEqual(lrs.reachedUnpublishedListing(), true);
done();
}
});
});
it('should consume from the first entry if there is no saved ID', done => {
const logEntries = [
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 1234, ts: Timestamp.fromNumber(42) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 5678, ts: Timestamp.fromNumber(42) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: -1234, ts: Timestamp.fromNumber(42) }),
Object.assign({}, mongoProcessedLogEntries.insert,
{ h: 2345, ts: Timestamp.fromNumber(42) }),
];
const cursor = new MongoCursorMock(logEntries);
const lrs = new ListRecordStream(cursor, logger, undefined, '-1234');
let nbReceivedEntries = 0;
lrs.on('data', entry => {
assert.deepStrictEqual(entry, expectedStreamEntries.insert);
++nbReceivedEntries;
if (cursor.hasSentAllItems()) {
assert.strictEqual(nbReceivedEntries, 4);
assert.deepStrictEqual(JSON.parse(lrs.getOffset()),
{ uniqID: '2345' });
assert.strictEqual(lrs.getSkipCount(), 0);
assert.strictEqual(lrs.reachedUnpublishedListing(), true);
done();
}
});
});
it('should emit an error event when cursor returns an error', done => {
const cursor = new MongoCursorMock([], 0);
const lrs = new ListRecordStream(cursor, logger, '4242', '-1234');
lrs.on('data', () => {
assert(false, 'did not expect data');
});
lrs.on('error', () => done());
});
it('should support bucket names with dots', done => {
const logEntry = {
h: -42,
ts: Timestamp.fromNumber(42),
op: 'i',
ns: 'metadata.some.bucket.with.dots',
o: {
_id: 'replicated-key\u000098467518084696999999RG001 19.3',
value: {
someField: 'someValue',
},
},
};
const expectedLogEntry = {
db: 'some.bucket.with.dots',
entries: [
{
key: 'replicated-key\u000098467518084696999999RG001 19.3',
type: 'put',
value: '{"someField":"someValue"}',
},
],
timestamp: new Date(42000),
};
const cursor = new MongoCursorMock([
lastEndIDEntry,
logEntry,
]);
const lrs = new ListRecordStream(cursor, logger,
lastEndIDEntry.h.toString());
lrs.on('data', entry => {
assert.deepStrictEqual(entry, expectedLogEntry);
done();
});
});
});

View File

@ -1,238 +0,0 @@
const assert = require('assert');
const MongoClientInterface = require(
'../../../../../lib/storage/metadata/mongoclient/MongoClientInterface');
const DummyMongoDB = require('./utils/DummyMongoDB');
const DummyConfigObject = require('./utils/DummyConfigObject');
const DummyRequestLogger = require('./utils/DummyRequestLogger');
const log = new DummyRequestLogger();
const mongoTestClient = new MongoClientInterface({});
mongoTestClient.db = new DummyMongoDB();
describe('MongoClientInterface, init behavior', () => {
let s3ConfigObj;
const locationConstraints = {
locationOne: { isTransient: true },
locationTwo: { isTransient: false },
};
beforeEach(() => {
s3ConfigObj = new DummyConfigObject();
});
it('should set DataCounter transientList when declaring a ' +
'new MongoClientInterface object', () => {
s3ConfigObj.setLocationConstraints(locationConstraints);
const mongoClient = new MongoClientInterface({ config: s3ConfigObj });
const expectedRes = { locationOne: true, locationTwo: false };
assert.deepStrictEqual(
mongoClient.dataCount.transientList, expectedRes);
});
it('should update DataCounter transientList if location constraints ' +
'are updated', done => {
const mongoClient = new MongoClientInterface({ config: s3ConfigObj });
assert.deepStrictEqual(mongoClient.dataCount.transientList, {});
const expectedRes = { locationOne: true, locationTwo: false };
s3ConfigObj.once('MongoClientTestDone', () => {
assert.deepStrictEqual(
mongoClient.dataCount.transientList, expectedRes);
return done();
});
s3ConfigObj.setLocationConstraints(locationConstraints);
});
});
describe('MongoClientInterface::_handleResults', () => {
it('should return zero-result', () => {
const testInput = {
masterCount: 0, masterData: {},
nullCount: 0, nullData: {},
versionCount: 0, versionData: {},
};
const testResults = mongoTestClient._handleResults(testInput, true);
const expectedRes = {
versions: 0, objects: 0,
dataManaged: {
total: { curr: 0, prev: 0 },
locations: {},
},
};
assert.deepStrictEqual(testResults, expectedRes);
});
it('should return correct value if isVer is false', () => {
const testInput = {
masterCount: 2, masterData: { test1: 10, test2: 10 },
nullCount: 2, nullData: { test1: 10, test2: 10 },
versionCount: 2, versionData: { test1: 20, test2: 20 },
};
const testResults = mongoTestClient._handleResults(testInput, false);
const expectedRes = {
versions: 0, objects: 4,
dataManaged: {
total: { curr: 40, prev: 0 },
locations: {
test1: { curr: 20, prev: 0 },
test2: { curr: 20, prev: 0 },
},
},
};
assert.deepStrictEqual(testResults, expectedRes);
});
it('should return correct value if isVer is true', () => {
const testInput = {
masterCount: 2, masterData: { test1: 10, test2: 10 },
nullCount: 2, nullData: { test1: 10, test2: 10 },
versionCount: 4, versionData: { test1: 20, test2: 20 },
};
const testResults = mongoTestClient._handleResults(testInput, true);
const expectedRes = {
versions: 2, objects: 4,
dataManaged: {
total: { curr: 40, prev: 20 },
locations: {
test1: { curr: 20, prev: 10 },
test2: { curr: 20, prev: 10 },
},
},
};
assert.deepStrictEqual(testResults, expectedRes);
});
});
describe('MongoClientInterface::_handleMongo', () => {
beforeEach(() => mongoTestClient.db.reset());
it('should return error if mongo aggregate fails', done => {
const retValues = [new Error('testError')];
mongoTestClient.db.setReturnValues(retValues);
const testCollection = mongoTestClient.db.collection('test');
mongoTestClient._handleMongo(testCollection, {}, false, log, err => {
assert(err, 'Expected error, but got success');
return done();
});
});
it('should return empty object if mongo aggregate has no results', done => {
const testCollection = mongoTestClient.db.collection('test');
mongoTestClient._handleMongo(testCollection, {}, false, log,
(err, res) => {
assert.ifError(err, `Expected success, but got error ${err}`);
assert.deepStrictEqual(res, {});
return done();
});
});
it('should return empty object if mongo aggregate has missing results',
done => {
const retValues = [[{
count: undefined,
data: undefined,
repData: undefined,
}]];
mongoTestClient.db.setReturnValues(retValues);
const testCollection = mongoTestClient.db.collection('test');
mongoTestClient._handleMongo(testCollection, {}, false, log,
(err, res) => {
assert.ifError(err, `Expected success, but got error ${err}`);
assert.deepStrictEqual(res, {});
return done();
});
});
const testRetValues = [[{
count: [{ _id: null, count: 100 }],
data: [
{ _id: 'locationone', bytes: 1000 },
{ _id: 'locationtwo', bytes: 1000 },
],
repData: [
{ _id: 'awsbackend', bytes: 500 },
{ _id: 'azurebackend', bytes: 500 },
{ _id: 'gcpbackend', bytes: 500 },
],
compData: [
{ _id: 'locationone', bytes: 500 },
{ _id: 'locationtwo', bytes: 500 },
],
}]];
it('should return correct results, transient false', done => {
mongoTestClient.db.setReturnValues(testRetValues);
const testCollection = mongoTestClient.db.collection('test');
mongoTestClient._handleMongo(testCollection, {}, false, log,
(err, res) => {
assert.ifError(err, `Expected success, but got error ${err}`);
assert.deepStrictEqual(res, {
count: 100,
data: {
locationone: 1000,
locationtwo: 1000,
awsbackend: 500,
azurebackend: 500,
gcpbackend: 500,
},
});
return done();
});
});
it('should return correct results, transient true', done => {
mongoTestClient.db.setReturnValues(testRetValues);
const testCollection = mongoTestClient.db.collection('test');
mongoTestClient._handleMongo(testCollection, {}, true, log,
(err, res) => {
assert.ifError(err, `Expected success, but got error ${err}`);
assert.deepStrictEqual(res, {
count: 100,
data: {
locationone: 500,
locationtwo: 500,
awsbackend: 500,
azurebackend: 500,
gcpbackend: 500,
},
});
return done();
});
});
const testRetValuesNeg = [[{
count: [{ _id: null, count: 100 }],
data: [
{ _id: 'locationone', bytes: 100 },
{ _id: 'locationtwo', bytes: 100 },
],
repData: [
{ _id: 'awsbackend', bytes: 500 },
{ _id: 'azurebackend', bytes: 500 },
{ _id: 'gcpbackend', bytes: 500 },
],
compData: [
{ _id: 'locationone', bytes: 500 },
{ _id: 'locationtwo', bytes: 500 },
],
}]];
it('should return clamp negative values to 0', done => {
mongoTestClient.db.setReturnValues(testRetValuesNeg);
const testCollection = mongoTestClient.db.collection('test');
mongoTestClient._handleMongo(testCollection, {}, true, log,
(err, res) => {
assert.ifError(err, `Expected success, but got error ${err}`);
assert.deepStrictEqual(res, {
count: 100,
data: {
locationone: 0,
locationtwo: 0,
awsbackend: 500,
azurebackend: 500,
gcpbackend: 500,
},
});
return done();
});
});
});

View File

@ -1,16 +0,0 @@
const { EventEmitter } = require('events');
class DummyConfigObject extends EventEmitter {
constructor() {
super();
this.locationConstraints = null;
this.isTest = true;
}
setLocationConstraints(locationConstraints) {
this.locationConstraints = locationConstraints;
this.emit('location-constraints-update');
}
}
module.exports = DummyConfigObject;

View File

@ -1,103 +0,0 @@
const testError = new Error('test error');
class DummyCollection {
constructor(name, isFail) {
this.s = {
name,
};
this.fail = isFail;
this.retQueue = [];
}
setReturnValues(retArray) {
this.retQueue.push(...retArray);
}
aggregate() {
return {
toArray: cb => {
if (this.retQueue.length <= 0) {
return cb(null, []);
}
const retVal = this.retQueue[0];
this.retQueue = this.retQueue.slice(1);
if (retVal instanceof Error) {
return cb(retVal);
}
return cb(null, retVal);
},
};
}
bulkWrite(cmds, opt, cb) {
process.stdout.write('mock mongodb.bulkWrite call\n');
if (this.fail) {
return cb(testError);
}
return cb();
}
update(target, doc, opt, cb) {
process.stdout.write('mock mongodb.update call\n');
if (this.fail) {
return cb(testError);
}
return cb();
}
find() {
return {
toArray: cb => {
if (this.retQueue.length <= 0) {
return cb(null, []);
}
const retVal = this.retQueue[0];
this.retQueue = this.retQueue.slice(1);
if (retVal instanceof Error) {
return cb(retVal);
}
return cb(null, retVal);
},
};
}
findOne(query, opt, cb) {
if (typeof opt === 'function' && cb === undefined) {
// eslint-disable-next-line no-param-reassign
cb = opt;
}
if (this.retQueue.length <= 0) {
return cb(null);
}
const retVal = this.retQueue[0];
this.retQueue = this.retQueue.slice(1);
if (retVal instanceof Error) {
return cb(retVal);
}
return cb(null, retVal);
}
}
class DummyMongoDB {
contructor() {
this.fail = false;
this.returnQueue = [];
}
reset() {
this.fail = false;
this.returnQueue = [];
}
setReturnValues(retValues) {
this.returnQueue.push(...retValues);
}
collection(name) {
const c = new DummyCollection(name, this.fail);
c.setReturnValues(this.returnQueue);
return c;
}
}
module.exports = DummyMongoDB;

View File

@ -1,58 +0,0 @@
class DummyRequestLogger {
constructor() {
this.ops = [];
this.counts = {
trace: 0,
debug: 0,
info: 0,
warn: 0,
error: 0,
fatal: 0,
};
this.defaultFields = {};
}
trace(msg) {
this.ops.push(['trace', [msg]]);
this.counts.trace += 1;
}
debug(msg) {
this.ops.push(['debug', [msg]]);
this.counts.debug += 1;
}
info(msg) {
this.ops.push(['info', [msg]]);
this.counts.info += 1;
}
warn(msg) {
this.ops.push(['warn', [msg]]);
this.counts.warn += 1;
}
error(msg) {
this.ops.push(['error', [msg]]);
this.counts.error += 1;
}
fatal(msg) {
this.ops.push(['fatal', [msg]]);
this.counts.fatal += 1;
}
getSerializedUids() { // eslint-disable-line class-methods-use-this
return 'dummy:Serialized:Uids';
}
addDefaultFields(fields) {
Object.assign(this.defaultFields, fields);
}
end() {
return this;
}
}
module.exports = DummyRequestLogger;

View File

@ -1,24 +0,0 @@
const basicMD = {
'content-length': 0,
'key': '',
'versionId': '',
'replicationInfo': {
backends: [], // site, status
},
'dataStoreName': 'mongotest',
};
function generateMD(objKey, size, versionId, repBackends) {
const retMD = JSON.parse(JSON.stringify(basicMD));
retMD.key = objKey;
retMD['content-length'] = size;
retMD.versionId = versionId;
if (repBackends && Array.isArray(repBackends)) {
retMD.replicationInfo.backends.push(...repBackends);
}
return retMD;
}
module.exports = {
generateMD,
};

View File

@ -28,7 +28,9 @@ describe('test generating versionIds', () => {
// nodejs 10 no longer returns error for non-hex string versionIds // nodejs 10 no longer returns error for non-hex string versionIds
it.skip('should return error decoding non-hex string versionIds', () => { it.skip('should return error decoding non-hex string versionIds', () => {
assert(VID.decode('foo') instanceof Error); const encoded = vids.map(vid => VID.encode(vid));
const decoded = encoded.map(vid => VID.decode(`${vid}foo`));
decoded.forEach(result => assert(result instanceof Error));
}); });
it('should encode and decode versionIds', () => { it('should encode and decode versionIds', () => {

View File

@ -1,185 +0,0 @@
'use strict'; // eslint-disable-line strict
class DummyProxyResponse {
/**
* Create a new instance of this dummy class
*
* This dummy class implements the minimum feature set
* of the class http.OutgoingMessage suitable for the
* arsenal.storage.metadata.proxy.BucketdRoutes test
* without using an actuall http server.
*
* @param {function} doneCB - function called once the response is
* ready to be consummed. (err, response, body)
*/
constructor(doneCB) {
this.headers = {};
this.body = null;
this.endCalled = false;
this.responseHead = null;
this.doneCB = doneCB;
}
writeHead(statusCode, statusMessage, header) {
this.responseHead = {
statusCode,
statusMessage,
header,
};
}
write(data) {
this.body = data;
}
end(cb) {
if (this.endCalled) {
return;
}
this.endCalled = true;
process.nextTick(() => {
cb(null);
this.doneCB(null, this, JSON.parse(this.body));
});
}
}
class DummyProxyRequest {
/**
* Create a new instance of this dummy class
*
* This dummy class implements the minimum feature set
* of the class http.IncomingMessage suitable for the
* arsenal.storage.metadata.proxy.BucketdRoutes test
* without using an actuall http server.
*
* @param {object} params - parameter set describing the intended request
* @param {string} params.method - http method to fake
* @param {string} params.url - url to fake
* @param {string} params.body - request body to fake
* @param {boolean} params.json - if set, assume the body to be a JSON
* value to be serialized
* @param {object} params.headers - request headers to fake
*/
constructor(params) {
this.method = params.method;
this.url = params.url;
this.json = params.json;
this.body = new Buffer(
this.json ? JSON.stringify(params.body) : (params.body || ''));
this.headers = params.headers;
this.socket = {
remoteAddress: '127.0.0.1',
remotePort: 32769,
};
this.dataConsummed = false;
this.endCB = null;
}
/**
* bind a callback to a particular event on the request processing
*
* @param {string} event - one of 'data', 'end' or 'error'
* @param {function} callback - a function suitable for the associated event
* @returns {object} this
*/
on(event, callback) {
switch (event) {
case 'data':
process.nextTick(() => {
callback(this.body);
this.dataConsummed = true;
if (this.endCB) {
this.endCB();
}
});
break;
case 'end':
if (!this.dataConsummed) {
this.endCB = callback;
} else {
process.nextTick(() => {
callback();
});
}
break;
case 'error':
// never happen with this mockup class
break;
default:
process.nextTick(() => callback(new Error(
`Unsupported DummyProxyRequest.on event '${event}'`)));
}
return this;
}
}
class RequestDispatcher {
/**
* Construct a new RequestDispatcher object.
*
* This class connects the provided Routes class to a dummy interface
* that enables tests to perform requests without using an actual http
* server.
*
* @param {object} routes - an instance of a Routes dispatcher class
*/
constructor(routes) {
this.routes = routes;
}
/**
* fake a POST request on the associated Routes dispatcher
*
* @param {string} path - the path of the object to be posted
* @param {object} objectMD - the metadata to post for this object
* @param {function} callback - called once the request has been processed
* with these parameters (err)
* @returns {undefined}
*/
post(path, objectMD, callback) {
this.routes.dispatch(new DummyProxyRequest({
method: 'POST',
url: path,
json: true,
body: objectMD,
headers: {},
}), new DummyProxyResponse(callback));
}
/**
* fake a GET request on the associated Routes dispatcher
*
* @param {string} path - the path of the object to be retrieved
* @param {function} callback - called once the request has been processed
* with these parameters (err, response, body)
* @returns {undefined}
*/
get(path, callback) {
this.routes.dispatch(new DummyProxyRequest({
method: 'GET',
url: path,
json: true,
body: '',
headers: {},
}), new DummyProxyResponse(callback));
}
/**
* fake a DELETE request on the associated Routes dispatcher
*
* @param {string} path - the path of the object to be deleted
* @param {function} callback - called once the request has been processed
* with these parameters (err)
* @returns {undefined}
*/
delete(path, callback) {
this.routes.dispatch(new DummyProxyRequest({
method: 'DELETE',
url: path,
json: true,
body: '',
headers: {},
}), new DummyProxyResponse(callback));
}
}
module.exports = { RequestDispatcher };