Compare commits
11 Commits
developmen
...
user/jbert
Author | SHA1 | Date |
---|---|---|
Guillaume Hivert | a14037f518 | |
Guillaume Hivert | 9d7f041508 | |
Jordi Bertran de Balanda | 61da23c8d2 | |
Jordi Bertran de Balanda | 65c94012c6 | |
Jordi Bertran de Balanda | cb87471cc8 | |
Jordi Bertran de Balanda | 57e4ecc0ee | |
Jordi Bertran de Balanda | 7a390e684d | |
Jordi Bertran de Balanda | bbcd33a14d | |
Jordi Bertran de Balanda | 88311ffbd9 | |
Jordi Bertran de Balanda | be1e0fc56f | |
Jordi Bertran de Balanda | 9a1a56e7b9 |
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"extends": ["scality"],
|
||||
"plugins": ["jest"],
|
||||
"env": {
|
||||
"jest/globals": true
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
module.exports = {
|
||||
presets: [
|
||||
['@babel/preset-env', { targets: { node: 'current' } }],
|
||||
'@babel/preset-typescript',
|
||||
],
|
||||
plugins: ['add-module-exports'],
|
||||
};
|
||||
|
|
@ -1,7 +1,6 @@
|
|||
const ArrayUtils = require('./ArrayUtils');
|
||||
|
||||
class SortedSet {
|
||||
|
||||
constructor(obj) {
|
||||
if (obj) {
|
||||
this.keys = obj.keys;
|
||||
|
|
|
@ -91,7 +91,7 @@ class Vault {
|
|||
requestContext: serializedRCsArr,
|
||||
},
|
||||
(err, userInfo) => vaultSignatureCb(err, userInfo,
|
||||
params.log, callback)
|
||||
params.log, callback),
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -146,7 +146,7 @@ class Vault {
|
|||
requestContext: serializedRCs,
|
||||
},
|
||||
(err, userInfo) => vaultSignatureCb(err, userInfo,
|
||||
params.log, callback, streamingV4Params)
|
||||
params.log, callback, streamingV4Params),
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -194,7 +194,7 @@ function generateV4Headers(request, data, accessKey, secretKeyValue,
|
|||
.filter(headerName =>
|
||||
headerName.startsWith('x-amz-')
|
||||
|| headerName.startsWith('x-scal-')
|
||||
|| headerName === 'host'
|
||||
|| headerName === 'host',
|
||||
).sort().join(';');
|
||||
const params = { request, signedHeaders, payloadChecksum,
|
||||
credentialScope, timestamp, query: data,
|
||||
|
|
|
@ -29,7 +29,7 @@ class ChainBackend extends BaseBackend {
|
|||
typeof client.getCanonicalIds === 'function' &&
|
||||
typeof client.getEmailAddresses === 'function' &&
|
||||
typeof client.checkPolicies === 'function' &&
|
||||
typeof client.healthcheck === 'function'
|
||||
typeof client.healthcheck === 'function',
|
||||
), 'invalid client: missing required auth backend methods');
|
||||
this._clients = clients;
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ class ChainBackend extends BaseBackend {
|
|||
signatureFromRequest,
|
||||
accessKey,
|
||||
options,
|
||||
done
|
||||
done,
|
||||
), callback);
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ class ChainBackend extends BaseBackend {
|
|||
region,
|
||||
scopeDate,
|
||||
options,
|
||||
done
|
||||
done,
|
||||
), callback);
|
||||
}
|
||||
|
||||
|
@ -153,7 +153,7 @@ class ChainBackend extends BaseBackend {
|
|||
requestContextParams,
|
||||
userArn,
|
||||
options,
|
||||
done
|
||||
done,
|
||||
), (err, res) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
|
@ -171,7 +171,7 @@ class ChainBackend extends BaseBackend {
|
|||
client.healthcheck(reqUid, (err, res) => done(null, {
|
||||
error: !!err ? err : null,
|
||||
status: res,
|
||||
})
|
||||
}),
|
||||
), (err, res) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
|
|
|
@ -273,7 +273,7 @@ class V4Transform extends Transform {
|
|||
}
|
||||
// get next chunk
|
||||
return callback();
|
||||
}
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ class RedisClient {
|
|||
method: 'RedisClient.constructor',
|
||||
redisHost: config.host,
|
||||
redisPort: config.port,
|
||||
})
|
||||
}),
|
||||
);
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ const StatsClient = require('./StatsClient');
|
|||
* rather than by seconds
|
||||
*/
|
||||
class StatsModel extends StatsClient {
|
||||
|
||||
/**
|
||||
* Utility method to convert 2d array rows to columns, and vice versa
|
||||
* See also: https://docs.ruby-lang.org/en/2.0.0/Array.html#method-i-zip
|
||||
|
|
|
@ -1128,7 +1128,7 @@ class LifecycleConfiguration {
|
|||
`<NoncurrentDays>${noncurrentDays}` +
|
||||
'</NoncurrentDays>',
|
||||
`<StorageClass>${storageClass}</StorageClass>`,
|
||||
`</${actionName}>`
|
||||
`</${actionName}>`,
|
||||
);
|
||||
});
|
||||
Action = xml.join('');
|
||||
|
@ -1148,7 +1148,7 @@ class LifecycleConfiguration {
|
|||
`<${actionName}>`,
|
||||
element,
|
||||
`<StorageClass>${storageClass}</StorageClass>`,
|
||||
`</${actionName}>`
|
||||
`</${actionName}>`,
|
||||
);
|
||||
});
|
||||
Action = xml.join('');
|
||||
|
|
|
@ -27,7 +27,7 @@ const errors = require('../errors');
|
|||
* </NotificationConfiguration>
|
||||
*/
|
||||
|
||||
/**
|
||||
/**
|
||||
* Format of config:
|
||||
*
|
||||
* config = {
|
||||
|
|
|
@ -17,7 +17,7 @@ const errors = require('../errors');
|
|||
* </ObjectLockConfiguration>
|
||||
*/
|
||||
|
||||
/**
|
||||
/**
|
||||
* Format of config:
|
||||
*
|
||||
* config = {
|
||||
|
|
|
@ -10,7 +10,6 @@ const ObjectMDLocation = require('./ObjectMDLocation');
|
|||
* mpuPart metadata for example)
|
||||
*/
|
||||
class ObjectMD {
|
||||
|
||||
/**
|
||||
* Create a new instance of ObjectMD. Parameter <tt>objMd</tt> is
|
||||
* reserved for internal use, users should call
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
* 'location' array
|
||||
*/
|
||||
class ObjectMDLocation {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
* @param {object} locationObj - single data location info
|
||||
|
|
|
@ -10,7 +10,6 @@ const { checkSupportIPv6 } = require('./utils');
|
|||
|
||||
|
||||
class Server {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
|
|
|
@ -342,8 +342,6 @@ class KMIP {
|
|||
return cb(null, response);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ function sendError(res, log, error, optMessage) {
|
|||
httpCode: error.code,
|
||||
errorType: error.message,
|
||||
error: message,
|
||||
}
|
||||
},
|
||||
);
|
||||
res.writeHead(error.code);
|
||||
res.end(JSON.stringify({
|
||||
|
|
|
@ -45,7 +45,6 @@ function sendError(res, log, error, optMessage) {
|
|||
* start() to start listening to the configured port.
|
||||
*/
|
||||
class RESTServer extends httpServer {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
* @param {Object} params - constructor params
|
||||
|
|
|
@ -17,7 +17,6 @@ const rpc = require('./rpc.js');
|
|||
* RPC client object accessing the sub-level transparently.
|
||||
*/
|
||||
class LevelDbClient extends rpc.BaseClient {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
|
@ -78,7 +77,6 @@ class LevelDbClient extends rpc.BaseClient {
|
|||
* env.subDb (env is passed as first parameter of received RPC calls).
|
||||
*/
|
||||
class LevelDbService extends rpc.BaseService {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
|
|
|
@ -37,7 +37,6 @@ let streamRPCJSONObj;
|
|||
* an error occurred).
|
||||
*/
|
||||
class BaseClient extends EventEmitter {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
|
@ -251,7 +250,6 @@ class BaseClient extends EventEmitter {
|
|||
*
|
||||
*/
|
||||
class BaseService {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
|
|
|
@ -51,10 +51,10 @@ wildcards.handleWildcardInResource = arn => {
|
|||
// Wildcards can be part of the resource ARN.
|
||||
// Wildcards do NOT span segments of the ARN (separated by ":")
|
||||
|
||||
// Example: all elements in specific bucket:
|
||||
// "Resource": "arn:aws:s3:::my_corporate_bucket/*"
|
||||
// ARN format:
|
||||
// arn:partition:service:region:namespace:relative-id
|
||||
// Example: all elements in specific bucket:
|
||||
// "Resource": "arn:aws:s3:::my_corporate_bucket/*"
|
||||
// ARN format:
|
||||
// arn:partition:service:region:namespace:relative-id
|
||||
const arnArr = arn.split(':');
|
||||
return arnArr.map(portion => wildcards.handleWildcards(portion));
|
||||
};
|
||||
|
|
|
@ -6,7 +6,6 @@ const crypto = require('crypto');
|
|||
* data through a stream
|
||||
*/
|
||||
class MD5Sum extends Transform {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*/
|
||||
|
@ -40,7 +39,6 @@ class MD5Sum extends Transform {
|
|||
this.emit('hashed');
|
||||
callback(null);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
module.exports = MD5Sum;
|
||||
|
|
|
@ -73,7 +73,7 @@ class ResultsCollector extends EventEmitter {
|
|||
* @property {Error} [results[].error] - error returned by Azure putting subpart
|
||||
* @property {number} results[].subPartIndex - index of the subpart
|
||||
*/
|
||||
/**
|
||||
/**
|
||||
* "error" event
|
||||
* @event ResultCollector#error
|
||||
* @type {(Error|undefined)} error - error returned by Azure last subpart
|
||||
|
|
|
@ -94,7 +94,7 @@ azureMpuUtils.getSubPartIds = (part, uploadId) =>
|
|||
azureMpuUtils.getBlockId(uploadId, part.partNumber, subPartIndex));
|
||||
|
||||
azureMpuUtils.putSinglePart = (errorWrapperFn, request, params, dataStoreName,
|
||||
log, cb) => {
|
||||
log, cb) => {
|
||||
const { bucketName, partNumber, size, objectKey, contentMD5, uploadId }
|
||||
= params;
|
||||
const blockId = azureMpuUtils.getBlockId(uploadId, partNumber, 0);
|
||||
|
@ -121,7 +121,7 @@ log, cb) => {
|
|||
return cb(errors.BadDigest);
|
||||
}
|
||||
return cb(errors.InternalError.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`)
|
||||
`Error returned from Azure: ${err.message}`),
|
||||
);
|
||||
}
|
||||
const md5 = result.headers['content-md5'] || '';
|
||||
|
@ -131,7 +131,7 @@ log, cb) => {
|
|||
};
|
||||
|
||||
azureMpuUtils.putNextSubPart = (errorWrapperFn, partParams, subPartInfo,
|
||||
subPartStream, subPartIndex, resultsCollector, log, cb) => {
|
||||
subPartStream, subPartIndex, resultsCollector, log, cb) => {
|
||||
const { uploadId, partNumber, bucketName, objectKey } = partParams;
|
||||
const subPartSize = azureMpuUtils.getSubPartSize(
|
||||
subPartInfo, subPartIndex);
|
||||
|
@ -144,7 +144,7 @@ subPartStream, subPartIndex, resultsCollector, log, cb) => {
|
|||
};
|
||||
|
||||
azureMpuUtils.putSubParts = (errorWrapperFn, request, params,
|
||||
dataStoreName, log, cb) => {
|
||||
dataStoreName, log, cb) => {
|
||||
const subPartInfo = azureMpuUtils.getSubPartInfo(params.size);
|
||||
const resultsCollector = new ResultsCollector();
|
||||
const hashedStream = new MD5Sum();
|
||||
|
|
|
@ -33,7 +33,7 @@ convertMethods.listMultipartUploads = xmlParams => {
|
|||
xml.push('<?xml version="1.0" encoding="UTF-8"?>',
|
||||
'<ListMultipartUploadsResult ' +
|
||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
||||
`<Bucket>${escapeForXml(xmlParams.bucketName)}</Bucket>`
|
||||
`<Bucket>${escapeForXml(xmlParams.bucketName)}</Bucket>`,
|
||||
);
|
||||
|
||||
// For certain XML elements, if it is `undefined`, AWS returns either an
|
||||
|
@ -58,7 +58,7 @@ convertMethods.listMultipartUploads = xmlParams => {
|
|||
});
|
||||
|
||||
xml.push(`<MaxUploads>${escapeForXml(l.MaxKeys)}</MaxUploads>`,
|
||||
`<IsTruncated>${escapeForXml(l.IsTruncated)}</IsTruncated>`
|
||||
`<IsTruncated>${escapeForXml(l.IsTruncated)}</IsTruncated>`,
|
||||
);
|
||||
|
||||
l.Uploads.forEach(upload => {
|
||||
|
@ -84,14 +84,14 @@ convertMethods.listMultipartUploads = xmlParams => {
|
|||
`<StorageClass>${escapeForXml(val.StorageClass)}` +
|
||||
'</StorageClass>',
|
||||
`<Initiated>${escapeForXml(val.Initiated)}</Initiated>`,
|
||||
'</Upload>'
|
||||
'</Upload>',
|
||||
);
|
||||
});
|
||||
|
||||
l.CommonPrefixes.forEach(prefix => {
|
||||
xml.push('<CommonPrefixes>',
|
||||
`<Prefix>${escapeForXml(prefix)}</Prefix>`,
|
||||
'</CommonPrefixes>'
|
||||
'</CommonPrefixes>',
|
||||
);
|
||||
});
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@ const Readable = require('stream').Readable;
|
|||
* This class is used to produce zeros filled buffers for a reader consumption
|
||||
*/
|
||||
class NullStream extends Readable {
|
||||
|
||||
/**
|
||||
* Construct a new zeros filled buffers producer that will
|
||||
* produce as much bytes as specified by the range parameter, or the size
|
||||
|
|
|
@ -110,7 +110,7 @@ function generateMpuPartStorageInfo(filteredPartList) {
|
|||
* and extraPartLocations
|
||||
*/
|
||||
function validateAndFilterMpuParts(storedParts, jsonList, mpuOverviewKey,
|
||||
splitter, log) {
|
||||
splitter, log) {
|
||||
let storedPartsCopy = [];
|
||||
const filteredPartsObj = {};
|
||||
filteredPartsObj.partList = [];
|
||||
|
|
|
@ -2,7 +2,7 @@ const errors = require('../../errors');
|
|||
const routesUtils = require('../routesUtils');
|
||||
|
||||
function routerGET(request, response, api, log, statsClient,
|
||||
dataRetrievalParams) {
|
||||
dataRetrievalParams) {
|
||||
log.debug('routing request', { method: 'routerGET' });
|
||||
if (request.bucketName === undefined && request.objectKey !== undefined) {
|
||||
routesUtils.responseXMLBody(errors.NoSuchBucket, null, response, log);
|
||||
|
|
|
@ -118,7 +118,7 @@ const XMLResponseBackend = {
|
|||
`<Message>${errCode.description}</Message>`,
|
||||
'<Resource></Resource>',
|
||||
`<RequestId>${log.getSerializedUids()}</RequestId>`,
|
||||
'</Error>'
|
||||
'</Error>',
|
||||
);
|
||||
const xmlStr = xml.join('');
|
||||
const bytesSent = Buffer.byteLength(xmlStr);
|
||||
|
@ -376,7 +376,7 @@ function retrieveData(locations, retrieveDataParams, response, log) {
|
|||
// call end for all cases (error/success) per node.js docs
|
||||
// recommendation
|
||||
response.end();
|
||||
}
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -591,7 +591,7 @@ const routesUtils = {
|
|||
`<h1>${err.code} ${response.statusMessage}</h1>`,
|
||||
'<ul>',
|
||||
`<li>Code: ${err.message}</li>`,
|
||||
`<li>Message: ${err.description}</li>`
|
||||
`<li>Message: ${err.description}</li>`,
|
||||
);
|
||||
|
||||
if (!userErrorPageFailure && bucketName) {
|
||||
|
@ -601,7 +601,7 @@ const routesUtils = {
|
|||
`<li>RequestId: ${log.getSerializedUids()}</li>`,
|
||||
// AWS response contains HostId here.
|
||||
// TODO: consider adding
|
||||
'</ul>'
|
||||
'</ul>',
|
||||
);
|
||||
if (userErrorPageFailure) {
|
||||
html.push(
|
||||
|
@ -611,13 +611,13 @@ const routesUtils = {
|
|||
'<ul>',
|
||||
`<li>Code: ${err.message}</li>`,
|
||||
`<li>Message: ${err.description}</li>`,
|
||||
'</ul>'
|
||||
'</ul>',
|
||||
);
|
||||
}
|
||||
html.push(
|
||||
'<hr/>',
|
||||
'</body>',
|
||||
'</html>'
|
||||
'</html>',
|
||||
);
|
||||
|
||||
return response.end(html.join(''), 'utf8', () => {
|
||||
|
@ -847,7 +847,7 @@ const routesUtils = {
|
|||
return bucketName;
|
||||
}
|
||||
throw new Error(
|
||||
`bad request: hostname ${host} is not in valid endpoints`
|
||||
`bad request: hostname ${host} is not in valid endpoints`,
|
||||
);
|
||||
},
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ const { createLogger, logHelper, removeQuotes, trimXMetaPrefix } =
|
|||
|
||||
const missingVerIdInternalError = errors.InternalError.customizeDescription(
|
||||
'Invalid state. Please ensure versioning is enabled ' +
|
||||
'in AWS for the location constraint and try again.'
|
||||
'in AWS for the location constraint and try again.',
|
||||
);
|
||||
|
||||
class AwsClient {
|
||||
|
@ -94,7 +94,7 @@ class AwsClient {
|
|||
err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
if (!data.VersionId && this._supportsVersioning) {
|
||||
|
@ -233,7 +233,7 @@ class AwsClient {
|
|||
}
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback();
|
||||
|
@ -307,7 +307,7 @@ class AwsClient {
|
|||
err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback(null, mpuResObj);
|
||||
|
@ -335,7 +335,7 @@ class AwsClient {
|
|||
'on uploadPart', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
// Because we manually add quotes to ETag later, remove quotes here
|
||||
|
@ -363,7 +363,7 @@ class AwsClient {
|
|||
err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
// build storedParts object to mimic Scality S3 backend returns
|
||||
|
@ -435,7 +435,7 @@ class AwsClient {
|
|||
'completeMPU', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
if (!completeMpuRes.VersionId && this._supportsVersioning) {
|
||||
|
@ -453,7 +453,7 @@ class AwsClient {
|
|||
'headObject', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
// remove quotes from eTag because they're added later
|
||||
|
@ -481,7 +481,7 @@ class AwsClient {
|
|||
this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback();
|
||||
|
@ -510,7 +510,7 @@ class AwsClient {
|
|||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback();
|
||||
|
@ -533,7 +533,7 @@ class AwsClient {
|
|||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback();
|
||||
|
@ -570,14 +570,14 @@ class AwsClient {
|
|||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.AccessDenied
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceAwsBucketName} ${this.type} bucket`)
|
||||
`${sourceAwsBucketName} ${this.type} bucket`),
|
||||
);
|
||||
}
|
||||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'copyObject', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
if (!copyResult.VersionId && this._supportsVersioning) {
|
||||
|
@ -629,14 +629,14 @@ class AwsClient {
|
|||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.AccessDenied
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceAwsBucketName} AWS bucket`)
|
||||
`${sourceAwsBucketName} AWS bucket`),
|
||||
);
|
||||
}
|
||||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'uploadPartCopy', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
const eTag = removeQuotes(res.CopyPartResult.ETag);
|
||||
|
|
|
@ -422,14 +422,14 @@ class AzureClient {
|
|||
this._dataStoreName);
|
||||
return callback(errors.AccessDenied
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceContainerName} Azure Container`)
|
||||
`${sourceContainerName} Azure Container`),
|
||||
);
|
||||
}
|
||||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'copyObject', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
if (res.copy.status === 'pending') {
|
||||
|
@ -443,12 +443,12 @@ class AzureClient {
|
|||
'on abortCopyBlob', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS on abortCopyBlob: ${err.message}`)
|
||||
`AWS on abortCopyBlob: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback(errors.InvalidObjectState
|
||||
.customizeDescription('Error: Azure copy status was ' +
|
||||
'pending. It has been aborted successfully')
|
||||
'pending. It has been aborted successfully'),
|
||||
);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -123,7 +123,7 @@ class GcpClient extends AwsClient {
|
|||
err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`GCP: ${err.message}`)
|
||||
`GCP: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback(null, mpuResObj);
|
||||
|
@ -168,7 +168,7 @@ class GcpClient extends AwsClient {
|
|||
'completeMPU', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`GCP: ${err.message}`)
|
||||
`GCP: ${err.message}`),
|
||||
);
|
||||
}
|
||||
if (!completeMpuRes.VersionId) {
|
||||
|
@ -210,7 +210,7 @@ class GcpClient extends AwsClient {
|
|||
'on uploadPart', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`GCP: ${err.message}`)
|
||||
`GCP: ${err.message}`),
|
||||
);
|
||||
}
|
||||
// remove quotes from eTag because they're added later
|
||||
|
@ -242,7 +242,7 @@ class GcpClient extends AwsClient {
|
|||
if (copySourceRange) {
|
||||
return callback(errors.NotImplemented
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.clientType}: copySourceRange not implemented`)
|
||||
`${this.clientType}: copySourceRange not implemented`),
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -261,14 +261,14 @@ class GcpClient extends AwsClient {
|
|||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.AccessDenied
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceGcpBucketName} GCP bucket`)
|
||||
`${sourceGcpBucketName} GCP bucket`),
|
||||
);
|
||||
}
|
||||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'uploadPartCopy', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`GCP: ${err.message}`)
|
||||
`GCP: ${err.message}`),
|
||||
);
|
||||
}
|
||||
// remove quotes from eTag because they're added later
|
||||
|
@ -291,7 +291,7 @@ class GcpClient extends AwsClient {
|
|||
'on abortMPU', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`GCP: ${err.message}`)
|
||||
`GCP: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback();
|
||||
|
|
|
@ -45,7 +45,7 @@ class PfsClient {
|
|||
}
|
||||
return callback(null, keyContext.objectKey, '',
|
||||
keyContext.metaHeaders['x-amz-meta-size'],
|
||||
md5
|
||||
md5,
|
||||
);
|
||||
}
|
||||
logHelper(log, 'error', 'Not implemented', errors.NotImplemented,
|
||||
|
|
|
@ -35,7 +35,6 @@ const FOLDER_HASH = 3511;
|
|||
* directory hash structure under the configured dataPath.
|
||||
*/
|
||||
class DataFileStore {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
* @param {Object} dataConfig - configuration of the file backend
|
||||
|
|
|
@ -30,7 +30,6 @@ class ListRecordStream extends stream.Transform {
|
|||
* @classdesc Proxy object to access raft log API
|
||||
*/
|
||||
class LogConsumer {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
|
|
|
@ -14,13 +14,13 @@ const _operatorType1 = joi.string().valid(
|
|||
'$gt',
|
||||
'$gte',
|
||||
'$lt',
|
||||
'$lte'
|
||||
'$lte',
|
||||
);
|
||||
|
||||
// supports strings, numbers, and boolean
|
||||
const _operatorType2 = joi.string().valid(
|
||||
'$eq',
|
||||
'$ne'
|
||||
'$ne',
|
||||
);
|
||||
|
||||
const _valueType1 = joi.alternatives([
|
||||
|
|
|
@ -17,7 +17,6 @@ const METASTORE = '__metastore';
|
|||
const itemScanRefreshDelay = 1000 * 30 * 60; // 30 minutes
|
||||
|
||||
class BucketFileInterface {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
* @param {object} [params] - constructor params
|
||||
|
|
|
@ -8,7 +8,6 @@ const { RecordLogProxy } = require('./RecordLog.js');
|
|||
const werelogs = require('werelogs');
|
||||
|
||||
class MetadataFileClient {
|
||||
|
||||
/**
|
||||
* Construct a metadata client
|
||||
*
|
||||
|
|
|
@ -25,7 +25,6 @@ const SYNC_OPTIONS = { sync: true };
|
|||
const SUBLEVEL_SEP = '::';
|
||||
|
||||
class MetadataFileServer {
|
||||
|
||||
/**
|
||||
* Construct a metadata server
|
||||
*
|
||||
|
|
|
@ -18,7 +18,6 @@ const DEFAULT_RECORD_LOG_NAME = 's3-recordlog';
|
|||
* object.
|
||||
*/
|
||||
class RecordLogProxy extends rpc.BaseClient {
|
||||
|
||||
constructor(params) {
|
||||
super(params);
|
||||
|
||||
|
@ -102,7 +101,6 @@ class ListRecordStream extends stream.Transform {
|
|||
* updates can be transactional with each other.
|
||||
*/
|
||||
class RecordLogService extends rpc.BaseService {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
|
|
|
@ -9,7 +9,6 @@ const MongoUtils = require('./utils');
|
|||
* @classdesc Class to consume mongo oplog
|
||||
*/
|
||||
class LogConsumer {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
|
|
|
@ -538,7 +538,7 @@ class MongoClientInterface {
|
|||
updateOne: {
|
||||
// eslint-disable-next-line
|
||||
filter: {
|
||||
_id: objName,
|
||||
'_id': objName,
|
||||
'value.versionId': params.versionId,
|
||||
},
|
||||
update: {
|
||||
|
@ -607,7 +607,7 @@ class MongoClientInterface {
|
|||
MongoUtils.serialize(mstObjVal);
|
||||
// eslint-disable-next-line
|
||||
c.update({
|
||||
_id: objName,
|
||||
'_id': objName,
|
||||
'value.versionId': {
|
||||
// We break the semantic correctness here with
|
||||
// $gte instead of $gt because we do not have
|
||||
|
@ -760,7 +760,7 @@ class MongoClientInterface {
|
|||
MongoUtils.serialize(objVal);
|
||||
// eslint-disable-next-line
|
||||
c.findOneAndReplace({
|
||||
_id: objName,
|
||||
'_id': objName,
|
||||
'value.isPHD': true,
|
||||
'value.versionId': mst.versionId,
|
||||
}, {
|
||||
|
@ -822,7 +822,7 @@ class MongoClientInterface {
|
|||
// version:
|
||||
// eslint-disable-next-line
|
||||
c.findOneAndDelete({
|
||||
_id: objName,
|
||||
'_id': objName,
|
||||
'value.isPHD': true,
|
||||
'value.versionId': mst.versionId,
|
||||
}, {}, err => {
|
||||
|
@ -1616,7 +1616,7 @@ class MongoClientInterface {
|
|||
const retResult = this._handleResults(collRes, isVer);
|
||||
retResult.stalled = stalledCount;
|
||||
return callback(null, retResult);
|
||||
}
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@ const requiresOneWorker = {
|
|||
};
|
||||
|
||||
class Server {
|
||||
|
||||
/**
|
||||
* Create a new Metadata Proxy Server instance
|
||||
*
|
||||
|
|
|
@ -160,7 +160,7 @@ class TestMatrix {
|
|||
const result = Object.keys(matrixChild.params)
|
||||
.every(currentKey =>
|
||||
Object.prototype.toString.call(
|
||||
matrixChild.params[currentKey]
|
||||
matrixChild.params[currentKey],
|
||||
).indexOf('Array') === -1);
|
||||
|
||||
if (result === true) {
|
||||
|
|
46
package.json
46
package.json
|
@ -33,7 +33,7 @@
|
|||
"fcntl": "github:scality/node-fcntl#0.2.0",
|
||||
"hdclient": "scality/hdclient#1.1.0",
|
||||
"https-proxy-agent": "^2.2.0",
|
||||
"ioredis": "4.9.5",
|
||||
"ioredis": "~4.28.3",
|
||||
"ipaddr.js": "1.9.1",
|
||||
"level": "~5.0.1",
|
||||
"level-sublevel": "~6.6.5",
|
||||
|
@ -53,26 +53,56 @@
|
|||
"ioctl": "^2.0.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/core": "^7.0.0-0",
|
||||
"@babel/preset-env": "^7.16.5",
|
||||
"@babel/preset-typescript": "^7.16.5",
|
||||
"@sinonjs/fake-timers": "^6.0.1",
|
||||
"eslint": "2.13.1",
|
||||
"@types/jest": "^27.0.3",
|
||||
"@types/node": "^16.11.7",
|
||||
"babel-plugin-add-module-exports": "^1.0.4",
|
||||
"eslint": "^7.32.0",
|
||||
"eslint-config-airbnb": "6.2.0",
|
||||
"eslint-config-scality": "scality/Guidelines#ec33dfb",
|
||||
"eslint-plugin-jest": "^24.7.0",
|
||||
"eslint-plugin-react": "^4.3.0",
|
||||
"jest": "^27.4.5",
|
||||
"mocha": "8.0.1",
|
||||
"mongodb-memory-server": "^6.0.2",
|
||||
"nyc": "^15.1.0",
|
||||
"sinon": "^9.0.2",
|
||||
"temp": "0.9.1"
|
||||
"temp": "0.9.1",
|
||||
"typescript": "^4.4.4"
|
||||
},
|
||||
"scripts": {
|
||||
"lint": "eslint $(git ls-files '*.js')",
|
||||
"lint_md": "mdlint $(git ls-files '*.md')",
|
||||
"lint_yml": "yamllint $(git ls-files '*.yml')",
|
||||
"test": "mocha --recursive --exit tests/unit",
|
||||
"ft_test": "find tests/functional -name \"*.js\" | grep -v \"utils/\" | xargs mocha --timeout 120000 --exit",
|
||||
"test": "jest tests/unit",
|
||||
"ft_test": "jest tests/functional --forceExit",
|
||||
"coverage": "yarn coverage_unit && yarn coverage_ft && yarn coverage_report",
|
||||
"coverage_unit": "nyc --silent yarn test",
|
||||
"coverage_ft": "nyc --silent --no-clean yarn ft_test",
|
||||
"coverage_report": "nyc report --all --reporter=text-summary --reporter=lcov"
|
||||
"coverage_unit": "yarn test --coverage",
|
||||
"coverage_ft": "yarn ft_test --coverage",
|
||||
"coverage_report": "nyc report --all --reporter=text-summary --reporter=lcov",
|
||||
"compile": "tsc"
|
||||
},
|
||||
"jest": {
|
||||
"collectCoverage": true,
|
||||
"maxWorkers": 1,
|
||||
"roots": [
|
||||
".",
|
||||
"lib",
|
||||
"errors"
|
||||
],
|
||||
"testPathIgnorePatterns": [
|
||||
"/node_modules/",
|
||||
"helpers?\\.js",
|
||||
"Mock.*\\.js",
|
||||
"Dummy.*\\.js",
|
||||
"tests/functional/.*/utils\\.js",
|
||||
"tests/functional/.*/utils/.*\\.js"
|
||||
],
|
||||
"testMatch": [
|
||||
"**/*.js"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ describe('KMIP Low Level Driver', () => {
|
|||
return done(err);
|
||||
}
|
||||
const responsePayload = response.lookup(
|
||||
'Response Message/Batch Item/Response Payload'
|
||||
'Response Message/Batch Item/Response Payload',
|
||||
)[0];
|
||||
assert.deepStrictEqual(responsePayload,
|
||||
requestPayload);
|
||||
|
|
|
@ -7,7 +7,7 @@ const { logger } = require('../../utils/kmip/ersatz.js');
|
|||
|
||||
describe('KMIP Connection Management', () => {
|
||||
let server;
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
server = net.createServer(conn => {
|
||||
// abort the connection as soon as it is accepted
|
||||
conn.destroy();
|
||||
|
@ -15,7 +15,7 @@ describe('KMIP Connection Management', () => {
|
|||
server.listen(5696);
|
||||
server.on('listening', done);
|
||||
});
|
||||
after(done => {
|
||||
afterAll(done => {
|
||||
server.close(done);
|
||||
});
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ const mongoserver = new MongoMemoryReplSet({
|
|||
|
||||
describe('MongoClientInterface', () => {
|
||||
let metadata;
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
mongoserver.waitUntilRunning().then(() => {
|
||||
const opts = {
|
||||
mongodb: {
|
||||
|
@ -44,7 +44,7 @@ describe('MongoClientInterface', () => {
|
|||
});
|
||||
});
|
||||
|
||||
after(done => {
|
||||
afterAll(done => {
|
||||
async.series([
|
||||
next => metadata.close(next),
|
||||
next => mongoserver.stop()
|
||||
|
|
|
@ -152,8 +152,8 @@ function _deleteObjects(objects, cb) {
|
|||
}
|
||||
|
||||
describe('Basic Metadata Proxy Server test',
|
||||
function bindToThis() {
|
||||
this.timeout(10000);
|
||||
() => {
|
||||
jest.setTimeout(10000);
|
||||
it('Shoud get the metadataInformation', done => {
|
||||
dispatcher.get('/default/metadataInformation',
|
||||
(err, response, body) => {
|
||||
|
@ -167,8 +167,8 @@ describe('Basic Metadata Proxy Server test',
|
|||
});
|
||||
});
|
||||
|
||||
describe('Basic Metadata Proxy Server CRUD test', function bindToThis() {
|
||||
this.timeout(10000);
|
||||
describe('Basic Metadata Proxy Server CRUD test', () => {
|
||||
jest.setTimeout(10000);
|
||||
|
||||
beforeEach(done => {
|
||||
dispatcher.post(`/default/bucket/${Bucket}`, bucketInfo,
|
||||
|
|
|
@ -28,7 +28,7 @@ describe('StatsClient class', () => {
|
|||
|
||||
afterEach(() => redisClient.clear(() => {}));
|
||||
|
||||
after(() => redisClient.disconnect());
|
||||
afterAll(() => redisClient.disconnect());
|
||||
|
||||
it('should correctly record a new request by default one increment',
|
||||
done => {
|
||||
|
|
|
@ -85,11 +85,11 @@ describe('LRUCache', () => {
|
|||
assert.strictEqual(lru.get(100), undefined);
|
||||
});
|
||||
|
||||
it('max 1000000 entries', function lru1M() {
|
||||
it('max 1000000 entries', () => {
|
||||
// this test takes ~1-2 seconds on a laptop, nevertheless set a
|
||||
// large timeout to reduce the potential of flakiness on possibly
|
||||
// slower CI environment.
|
||||
this.timeout(30000);
|
||||
jest.setTimeout(30000);
|
||||
|
||||
const lru = new LRUCache(1000000);
|
||||
|
||||
|
|
|
@ -85,7 +85,7 @@ const nonAlphabeticalData = [
|
|||
|
||||
const receivedData = data.map(item => ({ key: item.key, value: item.value }));
|
||||
const receivedNonAlphaData = nonAlphabeticalData.map(
|
||||
item => ({ key: item.key, value: item.value })
|
||||
item => ({ key: item.key, value: item.value }),
|
||||
);
|
||||
|
||||
const tests = [
|
||||
|
|
|
@ -189,8 +189,8 @@ describe('MergeStream', () => {
|
|||
`${usePauseResume ? ' with pause/resume' : ''}` +
|
||||
`${errorAtEnd ? ' with error' : ''}`;
|
||||
it(`${nbEntries} sequential entries${fixtureDesc}`,
|
||||
function bigMergeSequential(done) {
|
||||
this.timeout(10000);
|
||||
(done) => {
|
||||
jest.setTimeout(10000);
|
||||
const stream1 = [];
|
||||
const stream2 = [];
|
||||
for (let i = 0; i < nbEntries; ++i) {
|
||||
|
@ -204,8 +204,8 @@ describe('MergeStream', () => {
|
|||
stream1, stream2, usePauseResume, errorAtEnd, done);
|
||||
});
|
||||
it(`${nbEntries} randomly mingled entries${fixtureDesc}`,
|
||||
function bigMergeRandom(done) {
|
||||
this.timeout(10000);
|
||||
(done) => {
|
||||
jest.setTimeout(10000);
|
||||
const stream1 = [];
|
||||
const stream2 = [];
|
||||
let accu = nbEntries;
|
||||
|
|
|
@ -257,7 +257,7 @@ describe('Auth Backend: Chain Backend', () => {
|
|||
id3: 'email3@test.com',
|
||||
// id4 should be overwritten
|
||||
id4: 'email5@test.com',
|
||||
}
|
||||
},
|
||||
);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -35,11 +35,11 @@ describe('convertUTCtoISO8601 function', () => {
|
|||
|
||||
describe('checkTimeSkew function', () => {
|
||||
let clock;
|
||||
before(() => {
|
||||
beforeAll(() => {
|
||||
// Time is 2016-03-17T18:22:01.033Z
|
||||
clock = fakeTimers.install({ now: 1458238921033 });
|
||||
});
|
||||
after(() => {
|
||||
afterAll(() => {
|
||||
clock.uninstall();
|
||||
});
|
||||
|
||||
|
|
|
@ -57,7 +57,6 @@ function zpad(key, length = 15) {
|
|||
}
|
||||
|
||||
class DummyRequestLogger {
|
||||
|
||||
constructor() {
|
||||
this.ops = [];
|
||||
this.counts = {
|
||||
|
|
|
@ -69,7 +69,7 @@ describe('Check IP matches a list of CIDR ranges', () => {
|
|||
[['192.168.1.1'], '192.168.1.1'],
|
||||
].forEach(item =>
|
||||
it(`should match IP ${item[0][0]} without CIDR range`,
|
||||
() => cidrListMatchCheck(item[0], item[1], true))
|
||||
() => cidrListMatchCheck(item[0], item[1], true)),
|
||||
);
|
||||
|
||||
it('should not range match if CIDR range is not provided',
|
||||
|
|
|
@ -9,7 +9,7 @@ describe('ZenkoMetrics', () => {
|
|||
let summary;
|
||||
let petCounter;
|
||||
|
||||
before(() => {
|
||||
beforeAll(() => {
|
||||
counter = ZenkoMetrics.createCounter({
|
||||
name: 'gizmo_counter',
|
||||
help: 'Count gizmos',
|
||||
|
|
|
@ -631,7 +631,7 @@ Object.keys(acl).forEach(
|
|||
dummyBucket.getUid(), testUid);
|
||||
});
|
||||
});
|
||||
})
|
||||
}),
|
||||
);
|
||||
|
||||
describe('uid default', () => {
|
||||
|
|
|
@ -1207,7 +1207,7 @@ describe('LifecycleConfiguration::getConfigJson', () => {
|
|||
`should return correct configuration: ${msg}`, () => {
|
||||
assert.deepStrictEqual(
|
||||
LifecycleConfiguration.getConfigJson(input),
|
||||
expected
|
||||
expected,
|
||||
);
|
||||
}));
|
||||
});
|
||||
|
|
|
@ -187,7 +187,7 @@ const passTests = [
|
|||
];
|
||||
|
||||
describe('NotificationConfiguration class getValidatedNotificationConfiguration',
|
||||
() => {
|
||||
() => {
|
||||
it('should return MalformedXML error if request xml is empty', done => {
|
||||
const errMessage = 'request xml is undefined or empty';
|
||||
checkError('', 'MalformedXML', errMessage, done);
|
||||
|
@ -211,4 +211,4 @@ describe('NotificationConfiguration class getValidatedNotificationConfiguration'
|
|||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -228,7 +228,7 @@ const passTestsGetConfigXML = [
|
|||
];
|
||||
|
||||
describe('ObjectLockConfiguration class getValidatedObjectLockConfiguration',
|
||||
() => {
|
||||
() => {
|
||||
it('should return MalformedXML error if request xml is empty', done => {
|
||||
const errMessage = 'request xml is undefined or empty';
|
||||
checkError('', 'MalformedXML', errMessage, done);
|
||||
|
@ -252,7 +252,7 @@ describe('ObjectLockConfiguration class getValidatedObjectLockConfiguration',
|
|||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('ObjectLockConfiguration class getConfigXML', () => {
|
||||
passTestsGetConfigXML.forEach(test => {
|
||||
|
|
|
@ -34,11 +34,11 @@ describe('network.probe.HealthProbeServer', () => {
|
|||
server.start();
|
||||
}
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
setup(done);
|
||||
});
|
||||
|
||||
after(done => {
|
||||
afterAll(done => {
|
||||
server.stop();
|
||||
done();
|
||||
});
|
||||
|
@ -73,11 +73,11 @@ describe('network.probe.HealthProbeServer', () => {
|
|||
done();
|
||||
}
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
setup(done);
|
||||
});
|
||||
|
||||
after(done => {
|
||||
afterAll(done => {
|
||||
server.stop();
|
||||
done();
|
||||
});
|
||||
|
@ -108,11 +108,11 @@ describe('network.probe.HealthProbeServer', () => {
|
|||
done();
|
||||
}
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
setup(done);
|
||||
});
|
||||
|
||||
after(done => {
|
||||
afterAll(done => {
|
||||
server.stop();
|
||||
done();
|
||||
});
|
||||
|
@ -143,11 +143,11 @@ describe('network.probe.HealthProbeServer', () => {
|
|||
done();
|
||||
}
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
setup(done);
|
||||
});
|
||||
|
||||
after(done => {
|
||||
afterAll(done => {
|
||||
server.stop();
|
||||
done();
|
||||
});
|
||||
|
@ -176,11 +176,11 @@ describe('network.probe.HealthProbeServer', () => {
|
|||
server.start();
|
||||
}
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
setup(done);
|
||||
});
|
||||
|
||||
after(done => {
|
||||
afterAll(done => {
|
||||
server.stop();
|
||||
done();
|
||||
});
|
||||
|
|
|
@ -47,7 +47,7 @@ describe('network.probe.Utils', () => {
|
|||
{
|
||||
errorType: 'MethodNotAllowed',
|
||||
errorMessage: errors.MethodNotAllowed.description,
|
||||
}
|
||||
},
|
||||
);
|
||||
done();
|
||||
}),
|
||||
|
@ -64,7 +64,7 @@ describe('network.probe.Utils', () => {
|
|||
{
|
||||
errorType: 'MethodNotAllowed',
|
||||
errorMessage: 'Very much not allowed',
|
||||
}
|
||||
},
|
||||
);
|
||||
done();
|
||||
}),
|
||||
|
|
|
@ -53,11 +53,11 @@ describe('REST interface for blob data storage', () => {
|
|||
});
|
||||
}
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
setup(done);
|
||||
});
|
||||
|
||||
after(done => {
|
||||
afterAll(done => {
|
||||
server.stop();
|
||||
client.destroy();
|
||||
done();
|
||||
|
@ -135,7 +135,7 @@ describe('REST interface for blob data storage', () => {
|
|||
`bytes ${expectedStart}-${expectedEnd}/${contents.length}`);
|
||||
}
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
const rs = createReadStream(contents);
|
||||
client.put(rs, contents.length, '1', (err, key) => {
|
||||
assert.ifError(err);
|
||||
|
|
|
@ -161,7 +161,7 @@ describe('level-net - LevelDB over network', () => {
|
|||
async.series(opList, cb);
|
||||
}
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
temp.mkdir('level-net-testdb-', (err, dbDir) => {
|
||||
const rootDb = level(dbDir);
|
||||
db = sublevel(rootDb);
|
||||
|
@ -169,7 +169,7 @@ describe('level-net - LevelDB over network', () => {
|
|||
});
|
||||
});
|
||||
|
||||
after(done => {
|
||||
afterAll(done => {
|
||||
client.once('disconnect', () => {
|
||||
server.close();
|
||||
done();
|
||||
|
@ -249,7 +249,7 @@ describe('level-net - LevelDB over network', () => {
|
|||
.put(keyOfIter(i), valueOfIter(i), params, putCb);
|
||||
}
|
||||
}
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
prefillKeys(done);
|
||||
});
|
||||
it('should be able to read keys back at random', done => {
|
||||
|
|
|
@ -53,11 +53,11 @@ describe('rpc - generic client/server RPC system', () => {
|
|||
miscClient.connect(done);
|
||||
}
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
setupRPC(done);
|
||||
});
|
||||
|
||||
after(done => {
|
||||
afterAll(done => {
|
||||
miscClient.once('disconnect', () => {
|
||||
server.close();
|
||||
done();
|
||||
|
|
|
@ -317,9 +317,9 @@ describe('patch location constriants', () => {
|
|||
patchLocations(
|
||||
{ [locationName]: locations },
|
||||
{ privateKey },
|
||||
mockLog
|
||||
mockLog,
|
||||
),
|
||||
{ [locationName]: expected }
|
||||
{ [locationName]: expected },
|
||||
);
|
||||
});
|
||||
});
|
||||
|
@ -330,9 +330,9 @@ describe('patch location constriants', () => {
|
|||
patchLocations(
|
||||
undefined,
|
||||
{ privateKey },
|
||||
mockLog
|
||||
mockLog,
|
||||
),
|
||||
{}
|
||||
{},
|
||||
);
|
||||
});
|
||||
|
||||
|
@ -345,9 +345,9 @@ describe('patch location constriants', () => {
|
|||
},
|
||||
},
|
||||
{ privateKey },
|
||||
mockLog
|
||||
mockLog,
|
||||
),
|
||||
{}
|
||||
{},
|
||||
);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -34,7 +34,7 @@ function getRuleIDs(rules) {
|
|||
describe('LifecycleUtils::getApplicableRules', () => {
|
||||
let lutils;
|
||||
|
||||
before(() => {
|
||||
beforeAll(() => {
|
||||
lutils = new LifecycleUtils([
|
||||
'expiration',
|
||||
'noncurrentVersionExpiration',
|
||||
|
@ -434,7 +434,7 @@ describe('LifecycleUtils::getApplicableRules', () => {
|
|||
describe('LifecycleUtils::filterRules', () => {
|
||||
let lutils;
|
||||
|
||||
before(() => {
|
||||
beforeAll(() => {
|
||||
lutils = new LifecycleUtils();
|
||||
});
|
||||
|
||||
|
@ -522,7 +522,7 @@ describe('LifecycleUtils::filterRules', () => {
|
|||
const expRes1 = getRuleIDs(mBucketRules.filter(rule =>
|
||||
(rule.Filter && rule.Filter.Tag &&
|
||||
rule.Filter.Tag.Key === 'tag1' &&
|
||||
rule.Filter.Tag.Value === 'val1')
|
||||
rule.Filter.Tag.Value === 'val1'),
|
||||
));
|
||||
assert.deepStrictEqual(expRes1, getRuleIDs(res1));
|
||||
|
||||
|
@ -532,7 +532,7 @@ describe('LifecycleUtils::filterRules', () => {
|
|||
const expRes2 = getRuleIDs(mBucketRules.filter(rule =>
|
||||
rule.Filter && rule.Filter.Tag &&
|
||||
rule.Filter.Tag.Key === 'tag3-1' &&
|
||||
rule.Filter.Tag.Value === 'val3'
|
||||
rule.Filter.Tag.Value === 'val3',
|
||||
));
|
||||
assert.deepStrictEqual(expRes2, getRuleIDs(res2));
|
||||
});
|
||||
|
@ -619,7 +619,7 @@ describe('LifecycleUtils::filterRules', () => {
|
|||
describe('LifecycleUtils::getApplicableTransition', () => {
|
||||
let lutils;
|
||||
|
||||
before(() => {
|
||||
beforeAll(() => {
|
||||
lutils = new LifecycleUtils();
|
||||
});
|
||||
|
||||
|
@ -792,7 +792,7 @@ describe('LifecycleUtils::getApplicableTransition', () => {
|
|||
describe('LifecycleUtils::compareTransitions', () => {
|
||||
let lutils;
|
||||
|
||||
before(() => {
|
||||
beforeAll(() => {
|
||||
lutils = new LifecycleUtils();
|
||||
});
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ describe('routesUtils.responseStreamData', () => {
|
|||
let httpServer;
|
||||
let awsClient;
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
awsClient = new AwsClient(awsConfig);
|
||||
httpServer = http.createServer((req, res) => {
|
||||
const objStream = new DummyObjectStream(0, 10000000);
|
||||
|
@ -47,7 +47,7 @@ describe('routesUtils.responseStreamData', () => {
|
|||
httpServer.on('error', err => assert.ifError(err));
|
||||
});
|
||||
|
||||
after(() => {
|
||||
afterAll(() => {
|
||||
httpServer.close();
|
||||
});
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ describe('DummyObjectStream', () => {
|
|||
it('should return a stream of 8-byte hex-encoded blocks', async () => {
|
||||
// FIXME we likely need an eslint update
|
||||
/* eslint-disable no-unused-expressions */
|
||||
jest.setTimeout(30000);
|
||||
await testStream(0, 0, '');
|
||||
await testStream(50, 0, '');
|
||||
await testStream(0, 1, ' ');
|
||||
|
@ -53,5 +54,5 @@ describe('DummyObjectStream', () => {
|
|||
.join('');
|
||||
await testStream(567890123, 5 + 8 * 1024 * 1024 + 3, expectedLarge2);
|
||||
/* eslint-enable no-unused-expressions */
|
||||
}).timeout(30000);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -54,7 +54,7 @@ describe('external backend clients', () => {
|
|||
backendClients.forEach(backend => {
|
||||
let testClient;
|
||||
|
||||
before(() => {
|
||||
beforeAll(() => {
|
||||
testClient = new backend.Class(backend.config);
|
||||
testClient._client = new DummyService({ versioning: true });
|
||||
});
|
||||
|
|
|
@ -87,12 +87,12 @@ const operations = [
|
|||
},
|
||||
];
|
||||
|
||||
describe('GcpService request behavior', function testSuite() {
|
||||
this.timeout(120000);
|
||||
describe('GcpService request behavior', () => {
|
||||
jest.setTimeout(120000);
|
||||
let httpServer;
|
||||
let client;
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
client = new GCP({
|
||||
endpoint: `http://${host}`,
|
||||
maxRetries: 0,
|
||||
|
@ -109,7 +109,7 @@ describe('GcpService request behavior', function testSuite() {
|
|||
});
|
||||
});
|
||||
|
||||
after('Terminating Server', () => {
|
||||
afterAll(() => {
|
||||
httpServer.close();
|
||||
});
|
||||
|
||||
|
@ -125,12 +125,12 @@ describe('GcpService request behavior', function testSuite() {
|
|||
});
|
||||
});
|
||||
|
||||
describe('GcpService pathStyle tests', function testSuite() {
|
||||
this.timeout(120000);
|
||||
describe('GcpService pathStyle tests', () => {
|
||||
jest.setTimeout(120000);
|
||||
let httpServer;
|
||||
let client;
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
client = new GCP({
|
||||
endpoint: `http://${host}`,
|
||||
maxRetries: 0,
|
||||
|
@ -147,7 +147,7 @@ describe('GcpService pathStyle tests', function testSuite() {
|
|||
});
|
||||
});
|
||||
|
||||
after('Terminating Server', () => {
|
||||
afterAll(() => {
|
||||
httpServer.close();
|
||||
});
|
||||
|
||||
|
@ -159,12 +159,12 @@ describe('GcpService pathStyle tests', function testSuite() {
|
|||
}));
|
||||
});
|
||||
|
||||
describe('GcpService dnsStyle tests', function testSuite() {
|
||||
this.timeout(120000);
|
||||
describe('GcpService dnsStyle tests', () => {
|
||||
jest.setTimeout(120000);
|
||||
let httpServer;
|
||||
let client;
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
client = new GCP({
|
||||
endpoint: `http://${host}`,
|
||||
maxRetries: 0,
|
||||
|
@ -181,7 +181,7 @@ describe('GcpService dnsStyle tests', function testSuite() {
|
|||
});
|
||||
});
|
||||
|
||||
after('Terminating Server', () => {
|
||||
afterAll(() => {
|
||||
httpServer.close();
|
||||
});
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ function getDataWrapper() {
|
|||
let dw;
|
||||
|
||||
describe('Routes from DataWrapper to backend client', () => {
|
||||
before(() => {
|
||||
beforeAll(() => {
|
||||
dw = getDataWrapper();
|
||||
});
|
||||
|
||||
|
|
|
@ -82,7 +82,6 @@ const malformedLogEntry = new MockStream(malformedLogEntryData);
|
|||
|
||||
// mock a simple bucketclient to get a fake raft log
|
||||
class BucketClientMock {
|
||||
|
||||
getRaftLog(raftId, start, limit, targetLeader, reqUids, callback) {
|
||||
switch (raftId) {
|
||||
case 0:
|
||||
|
@ -117,11 +116,11 @@ describe('raft record log client', () => {
|
|||
done();
|
||||
}
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
setup(done);
|
||||
});
|
||||
|
||||
after(done => {
|
||||
afterAll(done => {
|
||||
done();
|
||||
});
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ describe('record log - persistent log of metadata operations', () => {
|
|||
done();
|
||||
}
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
temp.mkdir('record-log-testdir-', (err, dbDir) => {
|
||||
const rootDb = level(dbDir);
|
||||
db = sublevel(rootDb);
|
||||
|
@ -74,7 +74,7 @@ describe('record log - persistent log of metadata operations', () => {
|
|||
});
|
||||
});
|
||||
|
||||
after(done => {
|
||||
afterAll(done => {
|
||||
server.close();
|
||||
done();
|
||||
});
|
||||
|
@ -207,7 +207,7 @@ describe('record log - persistent log of metadata operations', () => {
|
|||
describe('readRecords', () => {
|
||||
let logProxy;
|
||||
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
logProxy = createScratchRecordLog(cliLogger, err => {
|
||||
assert.ifError(err);
|
||||
// fill the log with 1000 entries
|
||||
|
@ -228,7 +228,7 @@ describe('record log - persistent log of metadata operations', () => {
|
|||
});
|
||||
});
|
||||
|
||||
after(done => closeRecordLog(logProxy, () => {
|
||||
afterAll(done => closeRecordLog(logProxy, () => {
|
||||
logProxy = undefined;
|
||||
done();
|
||||
}));
|
||||
|
|
|
@ -395,7 +395,7 @@ describe('MongoClientInterface::_processEntryData', () => {
|
|||
tests.forEach(([msg, isTransient, params, expected]) => it(msg, () => {
|
||||
assert.deepStrictEqual(
|
||||
mongoTestClient._processEntryData(params, isTransient),
|
||||
expected
|
||||
expected,
|
||||
);
|
||||
}));
|
||||
});
|
||||
|
@ -498,7 +498,7 @@ describe('MongoClientInterface::_isReplicationEntryStalled', () => {
|
|||
tests.forEach(([msg, params, expected]) => it(msg, () => {
|
||||
assert.deepStrictEqual(
|
||||
mongoTestClient._isReplicationEntryStalled(params, testDate),
|
||||
expected
|
||||
expected,
|
||||
);
|
||||
}));
|
||||
});
|
||||
|
@ -556,7 +556,7 @@ function uploadObjects(client, bucketName, objectList, callback) {
|
|||
describe('MongoClientInterface, tests', () => {
|
||||
const hr = 1000 * 60 * 60;
|
||||
let client;
|
||||
before(done => {
|
||||
beforeAll(done => {
|
||||
mongoserver.waitUntilRunning().then(() => {
|
||||
const opts = {
|
||||
replicaSetHosts: 'localhost:27018',
|
||||
|
@ -573,7 +573,7 @@ describe('MongoClientInterface, tests', () => {
|
|||
});
|
||||
});
|
||||
|
||||
after(done => {
|
||||
afterAll(done => {
|
||||
async.series([
|
||||
next => client.close(next),
|
||||
next => mongoserver.stop()
|
||||
|
|
|
@ -32,8 +32,8 @@ describe('StringHash', () => {
|
|||
done();
|
||||
});
|
||||
it(`Should distribute uniformly with a maximum of ${ERROR}% of deviation`,
|
||||
function f(done) {
|
||||
this.timeout(20000);
|
||||
(done) => {
|
||||
jest.setTimeout(20000);
|
||||
const strings = new Array(STRING_COUNT).fill('')
|
||||
.map(() => randomString(10));
|
||||
const arr = new Array(ARRAY_LENGTH).fill(0);
|
||||
|
|
|
@ -177,7 +177,7 @@ class LoopbackServerChannel extends EchoChannel {
|
|||
serverExtensions.map(extension =>
|
||||
this.KMIP.TextString(
|
||||
extension.name,
|
||||
extension.value)
|
||||
extension.value),
|
||||
)));
|
||||
}
|
||||
if (queryFunctions.includes('Query Extension Map')) {
|
||||
|
|
|
@ -50,7 +50,6 @@ class EchoChannel extends EventEmitter {
|
|||
this.clogged = true;
|
||||
return this;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class MirrorChannel extends EchoChannel {
|
||||
|
|
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
"compilerOptions": {
|
||||
"target": "es6",
|
||||
"module": "commonjs",
|
||||
"rootDir": "./",
|
||||
"resolveJsonModule": true,
|
||||
"allowJs": true,
|
||||
"checkJs": false,
|
||||
"outDir": "./build",
|
||||
"esModuleInterop": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"strict": true
|
||||
},
|
||||
"include": [ "./index.js", "./lib/*.js", "./lib/**/*.js" ],
|
||||
"exclude": [ "./node_modules/*" ]
|
||||
}
|
Loading…
Reference in New Issue