Compare commits

...

9 Commits

Author SHA1 Message Date
Jordi Bertran de Balanda 9b92a50ca7 CLDSRV-174 - add required locationStorageCheck 2022-05-05 21:32:52 +02:00
Jordi Bertran de Balanda a6230f5538 CLDSRV-174 - correct flaky time comparison
Locally, the time comparison test
'"return PreconditionFailed if request header includes "if-unmodified-since"'
is flaky.

Intuitively, this is because the successive side effects on the master
date we use provokes the generation of 2 dates:
- if we are lucky, the scheduler and inherent time computation
  inaccuracies introduce a sufficient delay for one date to be strictly
  superior to the other, so the test succeeds
- if we're not, they're the same, so the test fails
2022-05-05 20:49:26 +02:00
Ronnie Smith 3bfcf624bf feature: CLDSRV-162 use data retrieval params over fn 2022-05-05 20:49:26 +02:00
Jordi Bertran de Balanda bd9209ef5e CLDSRV-174 - catch up linting debt 2022-05-05 20:49:26 +02:00
Jordi Bertran de Balanda 371cb689af CLDSRV-174 - update Arsenal and eslint 2022-05-05 20:49:26 +02:00
Xin LI 674860ef8a CLDSRV-174 - shorter error pattern 2022-05-05 20:49:19 +02:00
Xin LI ce28e08d3e CLDSRV-174 - use standard formated nonexist versionId 2022-05-05 20:49:12 +02:00
Xin LI 67df4fa207 CLDSRV-174 - error type migration 2022-05-05 17:59:44 +02:00
Xin LI 4100ac73b2 CLDSRV-174 - dockerfile 2022-05-05 17:59:27 +02:00
270 changed files with 12720 additions and 13618 deletions

View File

@ -1 +1,6 @@
{ "extends": "scality" } {
"extends": "scality",
"parserOptions": {
"ecmaVersion": 2020
}
}

View File

@ -924,7 +924,7 @@ class Config extends EventEmitter {
`bad config: utapi.filter.${state}.${field} must be an array of strings`); `bad config: utapi.filter.${state}.${field} must be an array of strings`);
utapiResourceFilters[field] = { [state]: new Set(resources) }; utapiResourceFilters[field] = { [state]: new Set(resources) };
} }
} },
)); ));
this.utapi.filter = utapiResourceFilters; this.utapi.filter = utapiResourceFilters;
} }
@ -1088,7 +1088,7 @@ class Config extends EventEmitter {
typeof config.certFilePaths.cert === 'string' && (( typeof config.certFilePaths.cert === 'string' && ((
config.certFilePaths.ca && config.certFilePaths.ca &&
typeof config.certFilePaths.ca === 'string') || typeof config.certFilePaths.ca === 'string') ||
!config.certFilePaths.ca) !config.certFilePaths.ca),
); );
} }
const { key, cert, ca } = config.certFilePaths ? const { key, cert, ca } = config.certFilePaths ?
@ -1174,7 +1174,7 @@ class Config extends EventEmitter {
const validBackends = ['mem', 'file', 'scality', 'cdmi']; const validBackends = ['mem', 'file', 'scality', 'cdmi'];
assert(validBackends.indexOf(process.env.S3BACKEND) > -1, assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
'bad environment variable: S3BACKEND environment variable ' + 'bad environment variable: S3BACKEND environment variable ' +
'should be one of mem/file/scality/cdmi' 'should be one of mem/file/scality/cdmi',
); );
auth = process.env.S3BACKEND; auth = process.env.S3BACKEND;
data = process.env.S3BACKEND; data = process.env.S3BACKEND;
@ -1209,7 +1209,7 @@ class Config extends EventEmitter {
const validData = ['mem', 'file', 'scality', 'multiple']; const validData = ['mem', 'file', 'scality', 'multiple'];
assert(validData.indexOf(process.env.S3DATA) > -1, assert(validData.indexOf(process.env.S3DATA) > -1,
'bad environment variable: S3DATA environment variable ' + 'bad environment variable: S3DATA environment variable ' +
'should be one of mem/file/scality/multiple' 'should be one of mem/file/scality/multiple',
); );
data = process.env.S3DATA; data = process.env.S3DATA;
} }
@ -1218,7 +1218,7 @@ class Config extends EventEmitter {
} }
assert(this.locationConstraints !== undefined && assert(this.locationConstraints !== undefined &&
this.restEndpoints !== undefined, this.restEndpoints !== undefined,
'bad config: locationConstraints and restEndpoints must be set' 'bad config: locationConstraints and restEndpoints must be set',
); );
if (process.env.S3METADATA) { if (process.env.S3METADATA) {

View File

@ -49,7 +49,7 @@ function updateRequestContexts(request, requestContexts, apiMethod, log, cb) {
return metadata.getObjectMD(bucketName, objectKey, { versionId: reqVersionId }, log, return metadata.getObjectMD(bucketName, objectKey, { versionId: reqVersionId }, log,
(err, objMD) => { (err, objMD) => {
if (err) { if (err) {
if (err.NoSuchKey) { if (err.is.NoSuchKey) {
return next(); return next();
} }
log.trace('error getting request object tags'); log.trace('error getting request object tags');

View File

@ -22,7 +22,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
// Get new format usersBucket to see if it exists // Get new format usersBucket to see if it exists
return metadata.getBucket(usersBucket, log, (err, usersBucketAttrs) => { return metadata.getBucket(usersBucket, log, (err, usersBucketAttrs) => {
if (err && !err.NoSuchBucket && !err.BucketAlreadyExists) { if (err && !err.is.NoSuchBucket && !err.is.BucketAlreadyExists) {
return cb(err); return cb(err);
} }
const splitter = usersBucketAttrs ? const splitter = usersBucketAttrs ?
@ -36,7 +36,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
usersBucket : oldUsersBucket; usersBucket : oldUsersBucket;
return metadata.putObjectMD(usersBucketBeingCalled, key, return metadata.putObjectMD(usersBucketBeingCalled, key,
omVal, {}, log, err => { omVal, {}, log, err => {
if (err && err.NoSuchBucket) { if (err?.is.NoSuchBucket) {
// There must be no usersBucket so createBucket // There must be no usersBucket so createBucket
// one using the new format // one using the new format
log.trace('users bucket does not exist, ' + log.trace('users bucket does not exist, ' +
@ -57,8 +57,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
// error with respect // error with respect
// to the usersBucket. // to the usersBucket.
if (err && if (err &&
err !== !err.is.BucketAlreadyExists) {
errors.BucketAlreadyExists) {
log.error('error from metadata', { log.error('error from metadata', {
error: err, error: err,
}); });
@ -206,7 +205,7 @@ function createBucket(authInfo, bucketName, headers,
}, },
getAnyExistingBucketInfo: function getAnyExistingBucketInfo(callback) { getAnyExistingBucketInfo: function getAnyExistingBucketInfo(callback) {
metadata.getBucket(bucketName, log, (err, data) => { metadata.getBucket(bucketName, log, (err, data) => {
if (err && err.NoSuchBucket) { if (err?.is.NoSuchBucket) {
return callback(null, 'NoBucketYet'); return callback(null, 'NoBucketYet');
} }
if (err) { if (err) {

View File

@ -16,7 +16,7 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
`${mpuBucketPrefix}${destinationBucketName}`; `${mpuBucketPrefix}${destinationBucketName}`;
return metadata.deleteBucket(mpuBucketName, log, err => { return metadata.deleteBucket(mpuBucketName, log, err => {
// If the mpu bucket does not exist, just move on // If the mpu bucket does not exist, just move on
if (err && err.NoSuchBucket) { if (err?.is.NoSuchBucket) {
return cb(); return cb();
} }
return cb(err); return cb(err);
@ -90,7 +90,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) {
log, (err, objectsListRes) => { log, (err, objectsListRes) => {
// If no shadow bucket ever created, no ongoing MPU's, so // If no shadow bucket ever created, no ongoing MPU's, so
// continue with deletion // continue with deletion
if (err && err.NoSuchBucket) { if (err?.is.NoSuchBucket) {
return next(); return next();
} }
if (err) { if (err) {

View File

@ -11,16 +11,16 @@ function deleteUserBucketEntry(bucketName, canonicalID, log, cb) {
metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => { metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => {
// If the object representing the bucket is not in the // If the object representing the bucket is not in the
// users bucket just continue // users bucket just continue
if (error && error.NoSuchKey) { if (error?.is.NoSuchKey) {
return cb(null); return cb(null);
// BACKWARDS COMPATIBILITY: Remove this once no longer // BACKWARDS COMPATIBILITY: Remove this once no longer
// have old user bucket format // have old user bucket format
} else if (error && error.NoSuchBucket) { } else if (error?.is.NoSuchBucket) {
const keyForUserBucket2 = createKeyForUserBucket(canonicalID, const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
oldSplitter, bucketName); oldSplitter, bucketName);
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2, return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
{}, log, error => { {}, log, error => {
if (error && !error.NoSuchKey) { if (error && !error.is.NoSuchKey) {
log.error('from metadata while deleting user bucket', log.error('from metadata while deleting user bucket',
{ error }); { error });
return cb(error); return cb(error);

View File

@ -100,7 +100,7 @@ function findCorsRule(rules, origin, method, headers) {
* @return {object} resHeaders - headers to include in response * @return {object} resHeaders - headers to include in response
*/ */
function generateCorsResHeaders(rule, origin, method, headers, function generateCorsResHeaders(rule, origin, method, headers,
isPreflight) { isPreflight) {
const resHeaders = { const resHeaders = {
'access-control-max-age': rule.maxAgeSeconds, 'access-control-max-age': rule.maxAgeSeconds,
'access-control-allow-methods': rule.allowedMethods.join(', '), 'access-control-allow-methods': rule.allowedMethods.join(', '),

View File

@ -239,7 +239,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
if (err) { if (err) {
// TODO: check AWS error when user requested a specific // TODO: check AWS error when user requested a specific
// version before any versions have been put // version before any versions have been put
const logLvl = err === errors.BadRequest ? const logLvl = err.is.BadRequest ?
'debug' : 'error'; 'debug' : 'error';
log[logLvl]('error getting versioning info', { log[logLvl]('error getting versioning info', {
error: err, error: err,

View File

@ -80,7 +80,7 @@ function _generateExpHeadresMPU(rules, params, datetime) {
const date = calculateDate( const date = calculateDate(
params.date, params.date,
rule.DaysAfterInitiation, rule.DaysAfterInitiation,
datetime datetime,
); );
return { return {

View File

@ -0,0 +1,48 @@
const { errors } = require('arsenal');
const { config } = require('../../../Config');
const { getLocationMetric, pushLocationMetric } =
require('../../../utapi/utilities');
function _gbToBytes(gb) {
return gb * 1024 * 1024 * 1024;
}
/**
* locationStorageCheck - will ensure there is enough space left for object on
* PUT operations, or will update metric on DELETE
* NOTE: storage limit may not be exactly enforced in the case of concurrent
* requests when near limit
* @param {string} location - name of location to check quota
* @param {number} updateSize - new size to check against quota in bytes
* @param {object} log - werelogs logger
* @param {function} cb - callback function
* @return {undefined}
*/
function locationStorageCheck(location, updateSize, log, cb) {
const lc = config.locationConstraints;
const sizeLimitGB = lc[location] ? lc[location].sizeLimitGB : undefined;
if (updateSize === 0 || sizeLimitGB === undefined || sizeLimitGB === null) {
return cb();
}
// no need to list location metric, since it should be decreased
if (updateSize < 0) {
return pushLocationMetric(location, updateSize, log, cb);
}
return getLocationMetric(location, log, (err, bytesStored) => {
if (err) {
log.error(`Error listing metrics from Utapi: ${err.message}`);
return cb(err);
}
const newStorageSize = parseInt(bytesStored, 10) + updateSize;
const sizeLimitBytes = _gbToBytes(sizeLimitGB);
if (sizeLimitBytes < newStorageSize) {
return cb(errors.AccessDenied.customizeDescription(
`The assigned storage space limit for location ${location} ` +
'will be exceeded'));
}
return pushLocationMetric(location, updateSize, log, cb);
});
}
module.exports = locationStorageCheck;

View File

@ -43,7 +43,7 @@ function validateHeaders(bucket, headers, log) {
!(objectLockMode && objectLockDate)) { !(objectLockMode && objectLockDate)) {
return errors.InvalidArgument.customizeDescription( return errors.InvalidArgument.customizeDescription(
'x-amz-object-lock-retain-until-date and ' + 'x-amz-object-lock-retain-until-date and ' +
'x-amz-object-lock-mode must both be supplied' 'x-amz-object-lock-mode must both be supplied',
); );
} }
const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']); const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']);

View File

@ -112,7 +112,7 @@ function generateMpuPartStorageInfo(filteredPartList) {
* and extraPartLocations * and extraPartLocations
*/ */
function validateAndFilterMpuParts(storedParts, jsonList, mpuOverviewKey, function validateAndFilterMpuParts(storedParts, jsonList, mpuOverviewKey,
splitter, log) { splitter, log) {
let storedPartsCopy = []; let storedPartsCopy = [];
const filteredPartsObj = {}; const filteredPartsObj = {};
filteredPartsObj.partList = []; filteredPartsObj.partList = [];

View File

@ -2,7 +2,7 @@ const { errors } = require('arsenal');
const { const {
parseRangeSpec, parseRangeSpec,
parseRange, parseRange,
} = require('arsenal/lib/network/http/utils'); } = require('arsenal').network.http.utils;
const constants = require('../../../../constants'); const constants = require('../../../../constants');
const setPartRanges = require('./setPartRanges'); const setPartRanges = require('./setPartRanges');

View File

@ -292,7 +292,7 @@ function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
// it's possible there was a concurrent request to // it's possible there was a concurrent request to
// delete the null version, so proceed with putting a // delete the null version, so proceed with putting a
// new version // new version
if (err === errors.NoSuchKey) { if (err.is.NoSuchKey) {
return next(null, options); return next(null, options);
} }
return next(errors.InternalError); return next(errors.InternalError);

View File

@ -113,7 +113,7 @@ function processVersions(bucketName, listParams, list, encType) {
xml.push( xml.push(
'<?xml version="1.0" encoding="UTF-8"?>', '<?xml version="1.0" encoding="UTF-8"?>',
'<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">', '<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
'<Name>', bucketName, '</Name>' '<Name>', bucketName, '</Name>',
); );
const isTruncated = list.IsTruncated ? 'true' : 'false'; const isTruncated = list.IsTruncated ? 'true' : 'false';
const xmlParams = [ const xmlParams = [
@ -160,7 +160,7 @@ function processVersions(bucketName, listParams, list, encType) {
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`, `<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
'</Owner>', '</Owner>',
`<StorageClass>${v.StorageClass}</StorageClass>`, `<StorageClass>${v.StorageClass}</StorageClass>`,
v.IsDeleteMarker ? '</DeleteMarker>' : '</Version>' v.IsDeleteMarker ? '</DeleteMarker>' : '</Version>',
); );
}); });
list.CommonPrefixes.forEach(item => { list.CommonPrefixes.forEach(item => {
@ -176,7 +176,7 @@ function processMasterVersions(bucketName, listParams, list) {
xml.push( xml.push(
'<?xml version="1.0" encoding="UTF-8"?>', '<?xml version="1.0" encoding="UTF-8"?>',
'<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">', '<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
'<Name>', bucketName, '</Name>' '<Name>', bucketName, '</Name>',
); );
const isTruncated = list.IsTruncated ? 'true' : 'false'; const isTruncated = list.IsTruncated ? 'true' : 'false';
const xmlParams = [ const xmlParams = [
@ -234,19 +234,19 @@ function processMasterVersions(bucketName, listParams, list) {
`<Key>${objectKey}</Key>`, `<Key>${objectKey}</Key>`,
`<LastModified>${v.LastModified}</LastModified>`, `<LastModified>${v.LastModified}</LastModified>`,
`<ETag>&quot;${v.ETag}&quot;</ETag>`, `<ETag>&quot;${v.ETag}&quot;</ETag>`,
`<Size>${v.Size}</Size>` `<Size>${v.Size}</Size>`,
); );
if (!listParams.v2 || listParams.fetchOwner) { if (!listParams.v2 || listParams.fetchOwner) {
xml.push( xml.push(
'<Owner>', '<Owner>',
`<ID>${v.Owner.ID}</ID>`, `<ID>${v.Owner.ID}</ID>`,
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`, `<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
'</Owner>' '</Owner>',
); );
} }
return xml.push( return xml.push(
`<StorageClass>${v.StorageClass}</StorageClass>`, `<StorageClass>${v.StorageClass}</StorageClass>`,
'</Contents>' '</Contents>',
); );
}); });
list.CommonPrefixes.forEach(item => { list.CommonPrefixes.forEach(item => {

View File

@ -67,7 +67,7 @@ function bucketGetEncryption(authInfo, request, log, callback) {
'</ApplyServerSideEncryptionByDefault>', '</ApplyServerSideEncryptionByDefault>',
'<BucketKeyEnabled>false</BucketKeyEnabled>', '<BucketKeyEnabled>false</BucketKeyEnabled>',
'</Rule>', '</Rule>',
'</ServerSideEncryptionConfiguration>' '</ServerSideEncryptionConfiguration>',
); );
pushMetric('getBucketEncryption', log, { pushMetric('getBucketEncryption', log, {

View File

@ -20,7 +20,7 @@ function convertToXml(versioningConfiguration) {
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>', xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
'<VersioningConfiguration ' + '<VersioningConfiguration ' +
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' 'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
); );
if (versioningConfiguration && versioningConfiguration.Status) { if (versioningConfiguration && versioningConfiguration.Status) {

View File

@ -50,7 +50,7 @@ const REPLICATION_ACTION = 'MPU';
*/ */
/* /*
Format of xml response: Format of xml response:
<?xml version='1.0' encoding='UTF-8'?> <?xml version='1.0' encoding='UTF-8'?>
<CompleteMultipartUploadResult <CompleteMultipartUploadResult
@ -341,7 +341,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
if (err) { if (err) {
// TODO: check AWS error when user requested a specific // TODO: check AWS error when user requested a specific
// version before any versions have been put // version before any versions have been put
const logLvl = err === errors.BadRequest ? const logLvl = err.is.BadRequest ?
'debug' : 'error'; 'debug' : 'error';
log[logLvl]('error getting versioning info', { log[logLvl]('error getting versioning info', {
error: err, error: err,

View File

@ -326,7 +326,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
return next(error, corsHeaders); return next(error, corsHeaders);
} }
return next(null, corsHeaders, destinationBucket, objectSSEConfig); return next(null, corsHeaders, destinationBucket, objectSSEConfig);
} },
), ),
], ],
(error, corsHeaders, destinationBucket, objectSSEConfig) => { (error, corsHeaders, destinationBucket, objectSSEConfig) => {
@ -334,7 +334,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
return callback(error, null, corsHeaders); return callback(error, null, corsHeaders);
} }
return _storetheMPObject(destinationBucket, corsHeaders, objectSSEConfig); return _storetheMPObject(destinationBucket, corsHeaders, objectSSEConfig);
} },
); );
return undefined; return undefined;
} }

View File

@ -245,7 +245,7 @@ function listParts(authInfo, request, log, callback) {
xml.push( xml.push(
'<?xml version="1.0" encoding="UTF-8"?>', '<?xml version="1.0" encoding="UTF-8"?>',
'<ListPartsResult xmlns="http://s3.amazonaws.com/doc/' + '<ListPartsResult xmlns="http://s3.amazonaws.com/doc/' +
'2006-03-01/">' '2006-03-01/">',
); );
buildXML([ buildXML([
{ tag: 'Bucket', value: bucketName }, { tag: 'Bucket', value: bucketName },

View File

@ -40,7 +40,7 @@ const versionIdUtils = versioning.VersionID;
*/ */
/* /*
Format of xml response: Format of xml response:
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/"> <DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
@ -84,7 +84,7 @@ function _formatXML(quietSetting, errorResults, deleted) {
'<Message>', '<Message>',
escapeForXml(errorObj.error.description), escapeForXml(errorObj.error.description),
'</Message>', '</Message>',
'</Error>' '</Error>',
); );
}); });
errorXML = errorXML.join(''); errorXML = errorXML.join('');
@ -110,13 +110,13 @@ function _formatXML(quietSetting, errorResults, deleted) {
'<Deleted>', '<Deleted>',
'<Key>', '<Key>',
escapeForXml(version.entry.key), escapeForXml(version.entry.key),
'</Key>' '</Key>',
); );
if (version.entry.versionId) { if (version.entry.versionId) {
deletedXML.push( deletedXML.push(
'<VersionId>', '<VersionId>',
escapeForXml(version.entry.versionId), escapeForXml(version.entry.versionId),
'</VersionId>' '</VersionId>',
); );
} }
if (isDeleteMarker) { if (isDeleteMarker) {
@ -126,7 +126,7 @@ function _formatXML(quietSetting, errorResults, deleted) {
'</DeleteMarker>', '</DeleteMarker>',
'<DeleteMarkerVersionId>', '<DeleteMarkerVersionId>',
deleteMarkerVersionId, deleteMarkerVersionId,
'</DeleteMarkerVersionId>' '</DeleteMarkerVersionId>',
); );
} }
deletedXML.push('</Deleted>'); deletedXML.push('</Deleted>');
@ -210,10 +210,10 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
(versionId, callback) => metadataGetObject(bucketName, entry.key, (versionId, callback) => metadataGetObject(bucketName, entry.key,
versionId, log, (err, objMD) => { versionId, log, (err, objMD) => {
// if general error from metadata return error // if general error from metadata return error
if (err && !err.NoSuchKey) { if (err && !err.is.NoSuchKey) {
return callback(err); return callback(err);
} }
if (err && err.NoSuchKey) { if (err?.is.NoSuchKey) {
const verCfg = bucket.getVersioningConfiguration(); const verCfg = bucket.getVersioningConfiguration();
// To adhere to AWS behavior, create a delete marker // To adhere to AWS behavior, create a delete marker
// if trying to delete an object that does not exist // if trying to delete an object that does not exist
@ -386,7 +386,7 @@ function multiObjectDelete(authInfo, request, log, callback) {
return vault.checkPolicies(requestContextParams, authInfo.getArn(), return vault.checkPolicies(requestContextParams, authInfo.getArn(),
log, (err, authorizationResults) => { log, (err, authorizationResults) => {
// there were no policies so received a blanket AccessDenied // there were no policies so received a blanket AccessDenied
if (err && err.AccessDenied) { if (err?.is.AccessDenied) {
objects.forEach(entry => { objects.forEach(entry => {
errorResults.push({ errorResults.push({
entry, entry,

View File

@ -1,5 +1,3 @@
const { errors } = require('arsenal');
const abortMultipartUpload = require('./apiUtils/object/abortMultipartUpload'); const abortMultipartUpload = require('./apiUtils/object/abortMultipartUpload');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const isLegacyAWSBehavior = require('../utilities/legacyAWSBehavior'); const isLegacyAWSBehavior = require('../utilities/legacyAWSBehavior');
@ -29,10 +27,10 @@ function multipartDelete(authInfo, request, log, callback) {
request.method, destinationBucket); request.method, destinationBucket);
const location = destinationBucket ? const location = destinationBucket ?
destinationBucket.getLocationConstraint() : null; destinationBucket.getLocationConstraint() : null;
if (err && err !== errors.NoSuchUpload) { if (err && !err.is.NoSuchUpload) {
return callback(err, corsHeaders); return callback(err, corsHeaders);
} }
if (err === errors.NoSuchUpload && isLegacyAWSBehavior(location)) { if (err?.is.NoSuchUpload && isLegacyAWSBehavior(location)) {
log.trace('did not find valid mpu with uploadId', { log.trace('did not find valid mpu with uploadId', {
method: 'multipartDelete', method: 'multipartDelete',
uploadId, uploadId,

View File

@ -1,5 +1,5 @@
const { errors, s3middleware } = require('arsenal'); const { errors, s3middleware } = require('arsenal');
const { parseRange } = require('arsenal/lib/network/http/utils'); const { parseRange } = require('arsenal').network.http.utils;
const data = require('../data/wrapper'); const data = require('../data/wrapper');

View File

@ -1,6 +1,6 @@
const { errors, s3middleware } = require('arsenal'); const { errors, s3middleware } = require('arsenal');
const validateHeaders = s3middleware.validateConditionalHeaders; const validateHeaders = s3middleware.validateConditionalHeaders;
const { parseRange } = require('arsenal/lib/network/http/utils'); const { parseRange } = require('arsenal').network.http.utils;
const { decodeVersionId } = require('./apiUtils/object/versioning'); const { decodeVersionId } = require('./apiUtils/object/versioning');
const collectCorsHeaders = require('../utilities/collectCorsHeaders'); const collectCorsHeaders = require('../utilities/collectCorsHeaders');

View File

@ -101,7 +101,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
return next(invalidSSEError); return next(invalidSSEError);
} }
return next(null, sseConfig); return next(null, sseConfig);
} },
); );
}, },
function createCipherBundle(serverSideEncryptionConfig, next) { function createCipherBundle(serverSideEncryptionConfig, next) {

View File

@ -182,7 +182,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
sourceLocationConstraintName, next) { sourceLocationConstraintName, next) {
return metadata.getBucket(mpuBucketName, log, return metadata.getBucket(mpuBucketName, log,
(err, mpuBucket) => { (err, mpuBucket) => {
if (err && err.NoSuchBucket) { if (err?.is.NoSuchBucket) {
return next(errors.NoSuchUpload); return next(errors.NoSuchUpload);
} }
if (err) { if (err) {
@ -211,7 +211,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, return metadata.getObjectMD(mpuBucketName, mpuOverviewKey,
null, log, (err, res) => { null, log, (err, res) => {
if (err) { if (err) {
if (err.NoSuchKey) { if (err.is.NoSuchKey) {
return next(errors.NoSuchUpload); return next(errors.NoSuchUpload);
} }
log.error('error getting overview object from ' + log.error('error getting overview object from ' +
@ -263,7 +263,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
metadata.getObjectMD(mpuBucketName, partKey, {}, log, metadata.getObjectMD(mpuBucketName, partKey, {}, log,
(err, result) => { (err, result) => {
// If there is nothing being overwritten just move on // If there is nothing being overwritten just move on
if (err && !err.NoSuchKey) { if (err && !err.is.NoSuchKey) {
log.debug('error getting current part (if any)', log.debug('error getting current part (if any)',
{ error: err }); { error: err });
return next(err); return next(err);

View File

@ -69,7 +69,7 @@ function objectPutLegalHold(authInfo, request, log, callback) {
log.trace('object lock not enabled on bucket', log.trace('object lock not enabled on bucket',
{ method: 'objectPutLegalHold' }); { method: 'objectPutLegalHold' });
return next(errors.InvalidRequest.customizeDescription( return next(errors.InvalidRequest.customizeDescription(
'Bucket is missing Object Lock Configuration' 'Bucket is missing Object Lock Configuration',
), bucket); ), bucket);
} }
return next(null, bucket, objectMD); return next(null, bucket, objectMD);

View File

@ -94,7 +94,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
// Get the destination bucket. // Get the destination bucket.
next => metadata.getBucket(bucketName, log, next => metadata.getBucket(bucketName, log,
(err, destinationBucket) => { (err, destinationBucket) => {
if (err && err.NoSuchBucket) { if (err?.is.NoSuchBucket) {
return next(errors.NoSuchBucket, destinationBucket); return next(errors.NoSuchBucket, destinationBucket);
} }
if (err) { if (err) {
@ -142,7 +142,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
(destinationBucket, cipherBundle, next) => (destinationBucket, cipherBundle, next) =>
metadata.getBucket(mpuBucketName, log, metadata.getBucket(mpuBucketName, log,
(err, mpuBucket) => { (err, mpuBucket) => {
if (err && err.NoSuchBucket) { if (err?.is.NoSuchBucket) {
return next(errors.NoSuchUpload, destinationBucket); return next(errors.NoSuchUpload, destinationBucket);
} }
if (err) { if (err) {
@ -252,7 +252,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
return metadata.getObjectMD(mpuBucketName, partKey, {}, log, return metadata.getObjectMD(mpuBucketName, partKey, {}, log,
(err, res) => { (err, res) => {
// If there is no object with the same key, continue. // If there is no object with the same key, continue.
if (err && !err.NoSuchKey) { if (err && !err.is.NoSuchKey) {
log.error('error getting current part (if any)', { log.error('error getting current part (if any)', {
error: err, error: err,
method: 'objectPutPart::metadata.getObjectMD', method: 'objectPutPart::metadata.getObjectMD',

View File

@ -72,7 +72,7 @@ function objectPutRetention(authInfo, request, log, callback) {
log.trace('object lock not enabled on bucket', log.trace('object lock not enabled on bucket',
{ method: 'objectPutRetention' }); { method: 'objectPutRetention' });
return next(errors.InvalidRequest.customizeDescription( return next(errors.InvalidRequest.customizeDescription(
'Bucket is missing Object Lock Configuration' 'Bucket is missing Object Lock Configuration',
), bucket); ), bucket);
} }
return next(null, bucket, objectMD); return next(null, bucket, objectMD);

View File

@ -35,7 +35,7 @@ function generateXml(xml, owner, userBuckets, splitter) {
`<Name>${key}</Name>`, `<Name>${key}</Name>`,
`<CreationDate>${bucket.value.creationDate}` + `<CreationDate>${bucket.value.creationDate}` +
'</CreationDate>', '</CreationDate>',
'</Bucket>' '</Bucket>',
); );
}); });
xml.push('</Buckets></ListAllMyBucketsResult>'); xml.push('</Buckets></ListAllMyBucketsResult>');
@ -68,7 +68,7 @@ function serviceGet(authInfo, request, log, callback) {
`<DisplayName>${authInfo.getAccountDisplayName()}` + `<DisplayName>${authInfo.getAccountDisplayName()}` +
'</DisplayName>', '</DisplayName>',
'</Owner>', '</Owner>',
'<Buckets>' '<Buckets>',
); );
return services.getService(authInfo, request, log, constants.splitter, return services.getService(authInfo, request, log, constants.splitter,
(err, userBuckets, splitter) => { (err, userBuckets, splitter) => {

View File

@ -147,7 +147,7 @@ function websiteGet(request, log, callback) {
'bucketGet', constants.publicId, null, log, request); 'bucketGet', constants.publicId, null, log, request);
// if index object does not exist and bucket is private AWS // if index object does not exist and bucket is private AWS
// returns 403 - AccessDenied error. // returns 403 - AccessDenied error.
if (err === errors.NoSuchKey && !bucketAuthorized) { if (err.is.NoSuchKey && !bucketAuthorized) {
returnErr = errors.AccessDenied; returnErr = errors.AccessDenied;
} }
return _errorActions(returnErr, return _errorActions(returnErr,

View File

@ -107,7 +107,7 @@ function websiteHead(request, log, callback) {
'bucketGet', constants.publicId, null, log, request); 'bucketGet', constants.publicId, null, log, request);
// if index object does not exist and bucket is private AWS // if index object does not exist and bucket is private AWS
// returns 403 - AccessDenied error. // returns 403 - AccessDenied error.
if (err === errors.NoSuchKey && !bucketAuthorized) { if (err.is.NoSuchKey && !bucketAuthorized) {
returnErr = errors.AccessDenied; returnErr = errors.AccessDenied;
} }
return _errorActions(returnErr, routingRules, return _errorActions(returnErr, routingRules,

View File

@ -283,7 +283,7 @@ class V4Transform extends Transform {
} }
// get next chunk // get next chunk
return callback(); return callback();
} },
); );
} }
} }

View File

@ -9,7 +9,7 @@ const { config } = require('../../Config');
const missingVerIdInternalError = errors.InternalError.customizeDescription( const missingVerIdInternalError = errors.InternalError.customizeDescription(
'Invalid state. Please ensure versioning is enabled ' + 'Invalid state. Please ensure versioning is enabled ' +
'in AWS for the location constraint and try again.' 'in AWS for the location constraint and try again.',
); );
class AwsClient { class AwsClient {
@ -42,7 +42,7 @@ class AwsClient {
err, this._dataStoreName); err, this._dataStoreName);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `AWS: ${err.message}`),
); );
} }
if (!data.VersionId) { if (!data.VersionId) {
@ -107,13 +107,13 @@ class AwsClient {
const error = errors.ServiceUnavailable const error = errors.ServiceUnavailable
.customizeDescription( .customizeDescription(
'Unexpected error from AWS: "NotFound". Data on AWS ' + 'Unexpected error from AWS: "NotFound". Data on AWS ' +
'may have been altered outside of CloudServer.' 'may have been altered outside of CloudServer.',
); );
return callback(error); return callback(error);
} }
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `AWS: ${err.message}`),
); );
} }
return callback(); return callback();
@ -160,7 +160,7 @@ class AwsClient {
} }
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `AWS: ${err.message}`),
); );
} }
return callback(); return callback();
@ -231,7 +231,7 @@ class AwsClient {
err, this._dataStoreName); err, this._dataStoreName);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `AWS: ${err.message}`),
); );
} }
return callback(null, mpuResObj); return callback(null, mpuResObj);
@ -259,7 +259,7 @@ class AwsClient {
'on uploadPart', err, this._dataStoreName); 'on uploadPart', err, this._dataStoreName);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `AWS: ${err.message}`),
); );
} }
// Because we manually add quotes to ETag later, remove quotes here // Because we manually add quotes to ETag later, remove quotes here
@ -287,7 +287,7 @@ class AwsClient {
err, this._dataStoreName); err, this._dataStoreName);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `AWS: ${err.message}`),
); );
} }
// build storedParts object to mimic Scality S3 backend returns // build storedParts object to mimic Scality S3 backend returns
@ -359,7 +359,7 @@ class AwsClient {
'completeMPU', err, this._dataStoreName); 'completeMPU', err, this._dataStoreName);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `AWS: ${err.message}`),
); );
} }
if (!completeMpuRes.VersionId) { if (!completeMpuRes.VersionId) {
@ -377,7 +377,7 @@ class AwsClient {
'headObject', err, this._dataStoreName); 'headObject', err, this._dataStoreName);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `AWS: ${err.message}`),
); );
} }
// remove quotes from eTag because they're added later // remove quotes from eTag because they're added later
@ -403,7 +403,7 @@ class AwsClient {
'using the same uploadId.', err, this._dataStoreName); 'using the same uploadId.', err, this._dataStoreName);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `AWS: ${err.message}`),
); );
} }
return callback(); return callback();
@ -431,7 +431,7 @@ class AwsClient {
'putObjectTagging', err, this._dataStoreName); 'putObjectTagging', err, this._dataStoreName);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `AWS: ${err.message}`),
); );
} }
return callback(); return callback();
@ -453,7 +453,7 @@ class AwsClient {
'deleteObjectTagging', err, this._dataStoreName); 'deleteObjectTagging', err, this._dataStoreName);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `AWS: ${err.message}`),
); );
} }
return callback(); return callback();
@ -490,14 +490,14 @@ class AwsClient {
this._dataStoreName); this._dataStoreName);
return callback(errors.AccessDenied return callback(errors.AccessDenied
.customizeDescription('Error: Unable to access ' + .customizeDescription('Error: Unable to access ' +
`${sourceAwsBucketName} AWS bucket`) `${sourceAwsBucketName} AWS bucket`),
); );
} }
logHelper(log, 'error', 'error from data backend on ' + logHelper(log, 'error', 'error from data backend on ' +
'copyObject', err, this._dataStoreName); 'copyObject', err, this._dataStoreName);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `AWS: ${err.message}`),
); );
} }
if (!copyResult.VersionId) { if (!copyResult.VersionId) {
@ -539,14 +539,14 @@ class AwsClient {
this._dataStoreName); this._dataStoreName);
return callback(errors.AccessDenied return callback(errors.AccessDenied
.customizeDescription('Error: Unable to access ' + .customizeDescription('Error: Unable to access ' +
`${sourceAwsBucketName} AWS bucket`) `${sourceAwsBucketName} AWS bucket`),
); );
} }
logHelper(log, 'error', 'error from data backend on ' + logHelper(log, 'error', 'error from data backend on ' +
'uploadPartCopy', err, this._dataStoreName); 'uploadPartCopy', err, this._dataStoreName);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `AWS: ${err.message}`),
); );
} }
const eTag = removeQuotes(res.CopyPartResult.ETag); const eTag = removeQuotes(res.CopyPartResult.ETag);

View File

@ -396,14 +396,14 @@ class AzureClient {
this._dataStoreName); this._dataStoreName);
return callback(errors.AccessDenied return callback(errors.AccessDenied
.customizeDescription('Error: Unable to access ' + .customizeDescription('Error: Unable to access ' +
`${sourceContainerName} Azure Container`) `${sourceContainerName} Azure Container`),
); );
} }
logHelper(log, 'error', 'error from data backend on ' + logHelper(log, 'error', 'error from data backend on ' +
'copyObject', err, this._dataStoreName); 'copyObject', err, this._dataStoreName);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS: ${err.message}`) `AWS: ${err.message}`),
); );
} }
if (res.copy.status === 'pending') { if (res.copy.status === 'pending') {
@ -417,12 +417,12 @@ class AzureClient {
'on abortCopyBlob', err, this._dataStoreName); 'on abortCopyBlob', err, this._dataStoreName);
return callback(errors.ServiceUnavailable return callback(errors.ServiceUnavailable
.customizeDescription('Error returned from ' + .customizeDescription('Error returned from ' +
`AWS on abortCopyBlob: ${err.message}`) `AWS on abortCopyBlob: ${err.message}`),
); );
} }
return callback(errors.InvalidObjectState return callback(errors.InvalidObjectState
.customizeDescription('Error: Azure copy status was ' + .customizeDescription('Error: Azure copy status was ' +
'pending. It has been aborted successfully') 'pending. It has been aborted successfully'),
); );
}); });
} }

View File

@ -1,26 +1,18 @@
const async = require('async'); const { storage } = require('arsenal');
const { errors, s3middleware } = require('arsenal');
const PassThrough = require('stream').PassThrough;
const DataFileInterface = require('./file/backend');
const inMemory = require('./in_memory/backend').backend;
const locationConstraintCheck =
require('../api/apiUtils/object/locationConstraintCheck');
const multipleBackendGateway = require('./multipleBackendGateway');
const utils = require('./external/utils');
const { config } = require('../Config'); const { config } = require('../Config');
const MD5Sum = s3middleware.MD5Sum;
const NullStream = s3middleware.NullStream;
const assert = require('assert');
const kms = require('../kms/wrapper'); const kms = require('../kms/wrapper');
const externalBackends = require('../../constants').externalBackends; const metadata = require('../metadata/wrapper');
const constants = require('../../constants'); const vault = require('../auth/vault');
const { BackendInfo } = require('../api/apiUtils/object/BackendInfo'); const locationStorageCheck =
const RelayMD5Sum = require('../utilities/RelayMD5Sum'); require('../api/apiUtils/object/locationStorageCheck');
const skipError = new Error('skip'); const { DataWrapper, MultipleBackendGateway, parseLC } = storage.data;
const { DataFileInterface } = storage.data.file;
const inMemory = storage.data.inMemory.datastore.backend;
let CdmiData; let CdmiData;
try { try {
// eslint-disable-next-line import/no-unresolved
CdmiData = require('cdmiclient').CdmiData; CdmiData = require('cdmiclient').CdmiData;
} catch (err) { } catch (err) {
CdmiData = null; CdmiData = null;
@ -33,10 +25,12 @@ if (config.backends.data === 'mem') {
client = inMemory; client = inMemory;
implName = 'mem'; implName = 'mem';
} else if (config.backends.data === 'file') { } else if (config.backends.data === 'file') {
client = new DataFileInterface(); client = new DataFileInterface(config);
implName = 'file'; implName = 'file';
} else if (config.backends.data === 'multiple') { } else if (config.backends.data === 'multiple') {
client = multipleBackendGateway; const clients = parseLC(config, vault);
client = new MultipleBackendGateway(
clients, metadata, locationStorageCheck);
implName = 'multipleBackends'; implName = 'multipleBackends';
} else if (config.backends.data === 'cdmi') { } else if (config.backends.data === 'cdmi') {
if (!CdmiData) { if (!CdmiData) {
@ -52,780 +46,16 @@ if (config.backends.data === 'mem') {
implName = 'cdmi'; implName = 'cdmi';
} }
/** const data = new DataWrapper(
* _retryDelete - Attempt to delete key again if it failed previously client, implName, config, kms, metadata, locationStorageCheck, vault);
* @param { string | object } objectGetInfo - either string location of object
* to delete or object containing info of object to delete
* @param {object} log - Werelogs request logger
* @param {number} count - keeps count of number of times function has been run
* @param {function} cb - callback
* @returns undefined and calls callback
*/
const MAX_RETRY = 2;
// This check is done because on a put, complete mpu or copy request to
// Azure/AWS, if the object already exists on that backend, the existing object
// should not be deleted, which is the functionality for all other backends
function _shouldSkipDelete(locations, requestMethod, newObjDataStoreName) {
const skipMethods = { PUT: true, POST: true };
if (!Array.isArray(locations) || !locations[0] ||
!locations[0].dataStoreType) {
return false;
}
const isSkipBackend = externalBackends[locations[0].dataStoreType];
const isMatchingBackends =
locations[0].dataStoreName === newObjDataStoreName;
const isSkipMethod = skipMethods[requestMethod];
return (isSkipBackend && isMatchingBackends && isSkipMethod);
}
function _retryDelete(objectGetInfo, log, count, cb) {
if (count > MAX_RETRY) {
return cb(errors.InternalError);
}
return client.delete(objectGetInfo, log.getSerializedUids(), err => {
if (err) {
if (err.ObjNotFound) {
log.info('no such key in datastore',
{ objectGetInfo, implName, moreRetries: 'no' });
return cb(err);
}
log.error('delete error from datastore',
{ error: err, implName, moreRetries: 'yes' });
return _retryDelete(objectGetInfo, log, count + 1, cb);
}
return cb();
});
}
function _put(cipherBundle, value, valueSize,
keyContext, backendInfo, log, cb) {
assert.strictEqual(typeof valueSize, 'number');
log.debug('sending put to datastore', { implName, keyContext,
method: 'put' });
let hashedStream = null;
if (value) {
hashedStream = new MD5Sum();
value.pipe(hashedStream);
value.once('clientError', () => {
log.trace('destroying hashed stream');
hashedStream.destroy();
});
}
config.on('location-constraints-update', () => {
if (implName === 'multipleBackends') { if (implName === 'multipleBackends') {
// Need to send backendInfo to client.put and const clients = parseLC(config, vault);
// client.put will provide dataRetrievalInfo so no client = new MultipleBackendGateway(
// need to construct here clients, metadata, locationStorageCheck);
/* eslint-disable no-param-reassign */ data.switch(client);
keyContext.cipherBundle = cipherBundle;
return client.put(hashedStream,
valueSize, keyContext, backendInfo, log.getSerializedUids(),
(err, dataRetrievalInfo) => {
if (err) {
log.error('put error from datastore',
{ error: err, implName });
if (err.httpCode === 408) {
return cb(errors.IncompleteBody);
} }
return cb(errors.ServiceUnavailable); });
}
return cb(null, dataRetrievalInfo, hashedStream);
});
}
/* eslint-enable no-param-reassign */
let writeStream = hashedStream; module.exports = { data, client, implName };
if (cipherBundle && cipherBundle.cipher) {
writeStream = cipherBundle.cipher;
hashedStream.pipe(writeStream);
}
return client.put(writeStream, valueSize, keyContext,
log.getSerializedUids(), (err, key) => {
if (err) {
log.error('put error from datastore',
{ error: err, implName });
if (err.httpCode === 408) {
return cb(errors.IncompleteBody);
}
return cb(errors.InternalError);
}
const dataRetrievalInfo = {
key,
dataStoreName: implName,
};
return cb(null, dataRetrievalInfo, hashedStream);
});
}
const data = {
put: (cipherBundle, value, valueSize, keyContext, backendInfo, log, cb) => {
_put(cipherBundle, value, valueSize, keyContext, backendInfo, log,
(err, dataRetrievalInfo, hashedStream) => {
if (err) {
return cb(err);
}
if (hashedStream) {
if (hashedStream.completedHash) {
return cb(null, dataRetrievalInfo, hashedStream);
}
hashedStream.on('hashed', () => {
hashedStream.removeAllListeners('hashed');
return cb(null, dataRetrievalInfo, hashedStream);
});
return undefined;
}
return cb(null, dataRetrievalInfo);
});
},
head: (objectGetInfo, log, cb) => {
if (implName !== 'multipleBackends') {
// no-op if not multipleBackend implementation;
// head is used during get just to check external backend data state
return process.nextTick(cb);
}
return client.head(objectGetInfo, log.getSerializedUids(), cb);
},
get: (objectGetInfo, response, log, cb) => {
const isMdModelVersion2 = typeof(objectGetInfo) === 'string';
const isRequiredStringKey = constants.clientsRequireStringKey[implName];
const key = isMdModelVersion2 ? objectGetInfo : objectGetInfo.key;
const clientGetInfo = isRequiredStringKey ? key : objectGetInfo;
const range = objectGetInfo.range;
// If the key is explicitly set to null, the part to
// be read doesn't really exist and is only made of zeroes.
// This functionality is used by Scality-NFSD.
// Otherwise, the key is always defined
assert(key === null || key !== undefined);
if (key === null) {
cb(null, new NullStream(objectGetInfo.size, range));
return;
}
log.debug('sending get to datastore', { implName,
key, range, method: 'get' });
// We need to use response as a writable stream for AZURE GET
if (!isMdModelVersion2 && !isRequiredStringKey && response) {
clientGetInfo.response = response;
}
client.get(clientGetInfo, range, log.getSerializedUids(),
(err, stream) => {
if (err) {
log.error('get error from datastore',
{ error: err, implName });
return cb(errors.ServiceUnavailable);
}
if (objectGetInfo.cipheredDataKey) {
const serverSideEncryption = {
cryptoScheme: objectGetInfo.cryptoScheme,
masterKeyId: objectGetInfo.masterKeyId,
cipheredDataKey: Buffer.from(
objectGetInfo.cipheredDataKey, 'base64'),
};
const offset = objectGetInfo.range ?
objectGetInfo.range[0] : 0;
return kms.createDecipherBundle(
serverSideEncryption, offset, log,
(err, decipherBundle) => {
if (err) {
log.error('cannot get decipher bundle ' +
'from kms', {
method: 'data.wrapper.data.get',
});
return cb(err);
}
stream.pipe(decipherBundle.decipher);
return cb(null, decipherBundle.decipher);
});
}
return cb(null, stream);
});
},
delete: (objectGetInfo, log, cb) => {
const callback = cb || log.end;
const isMdModelVersion2 = typeof(objectGetInfo) === 'string';
const isRequiredStringKey = constants.clientsRequireStringKey[implName];
const key = isMdModelVersion2 ? objectGetInfo : objectGetInfo.key;
const clientGetInfo = isRequiredStringKey ? key : objectGetInfo;
log.trace('sending delete to datastore', {
implName, key, method: 'delete' });
// If the key is explicitly set to null, the part to
// be deleted doesn't really exist.
// This functionality is used by Scality-NFSD.
// Otherwise, the key is always defined
assert(key === null || key !== undefined);
if (key === null) {
callback(null);
return;
}
_retryDelete(clientGetInfo, log, 0, err => {
if (err && !err.ObjNotFound) {
log.error('delete error from datastore',
{ error: err, key: objectGetInfo.key, moreRetries: 'no' });
}
return callback(err);
});
},
batchDelete: (locations, requestMethod, newObjDataStoreName, log, cb) => {
// TODO: The method of persistence of sproxy delete key will
// be finalized; refer Issue #312 for the discussion. In the
// meantime, we at least log the location of the data we are
// about to delete before attempting its deletion.
if (_shouldSkipDelete(locations, requestMethod, newObjDataStoreName)) {
return process.nextTick(cb);
}
log.trace('initiating batch delete', {
keys: locations,
implName,
method: 'batchDelete',
});
const keys = [];
let backendName = '';
const shouldBatchDelete = locations.every(l => {
// legacy sproxyd location, should fallback to using regular delete
if (typeof l === 'string') {
return false;
}
const { dataStoreName, key } = l;
backendName = dataStoreName;
const type = config.getLocationConstraintType(dataStoreName);
// filter out possible `null` created by NFS
if (key && type === 'scality') {
keys.push(key);
return true;
}
return false;
});
if (shouldBatchDelete) {
return client.batchDelete(backendName, { keys }, log, cb);
}
return async.eachLimit(locations, 5, (loc, next) => {
process.nextTick(() => data.delete(loc, log, next));
},
err => {
if (err) {
log.end().error('batch delete failed', { error: err });
// deletion of non-existing objects result in 204
if (err.code === 404) {
return cb();
}
return cb(err);
}
log.end().trace('batch delete successfully completed');
return cb();
});
},
switch: newClient => {
client = newClient;
return client;
},
checkHealth: (log, cb, flightCheckOnStartUp) => {
if (!client.healthcheck) {
const defResp = {};
defResp[implName] = { code: 200, message: 'OK' };
return cb(null, defResp);
}
return client.healthcheck(flightCheckOnStartUp, log, (err, result) => {
let respBody = {};
if (err) {
log.error(`error from ${implName}`, { error: err });
respBody[implName] = {
error: err,
};
// error returned as null so async parallel doesn't return
// before all backends are checked
return cb(null, respBody);
}
if (implName === 'multipleBackends') {
respBody = result;
return cb(null, respBody);
}
respBody[implName] = {
code: result.statusCode,
message: result.statusMessage,
};
return cb(null, respBody);
});
},
getDiskUsage: (log, cb) => {
if (!client.getDiskUsage) {
log.debug('returning empty disk usage as fallback', { implName });
return cb(null, {});
}
return client.getDiskUsage(log.getSerializedUids(), cb);
},
/**
* _putForCopy - put used for copying object
* @param {object} cipherBundle - cipher bundle that encrypt the data
* @param {object} stream - stream containing the data
* @param {object} part - element of dataLocator array
* @param {object} dataStoreContext - information of the
* destination object
* dataStoreContext.bucketName: destination bucket name,
* dataStoreContext.owner: owner,
* dataStoreContext.namespace: request namespace,
* dataStoreContext.objectKey: destination object key name,
* @param {BackendInfo} destBackendInfo - Instance of BackendInfo:
* Represents the info necessary to evaluate which data backend to use
* on a data put call.
* @param {object} log - Werelogs request logger
* @param {function} cb - callback
* @returns {function} cb - callback
*/
_putForCopy: (cipherBundle, stream, part, dataStoreContext,
destBackendInfo, log, cb) => data.put(cipherBundle, stream,
part.size, dataStoreContext,
destBackendInfo, log,
(error, partRetrievalInfo) => {
if (error) {
return cb(error);
}
const partResult = {
key: partRetrievalInfo.key,
dataStoreName: partRetrievalInfo
.dataStoreName,
dataStoreType: partRetrievalInfo
.dataStoreType,
start: part.start,
size: part.size,
};
if (cipherBundle) {
partResult.cryptoScheme = cipherBundle.cryptoScheme;
partResult.cipheredDataKey = cipherBundle.cipheredDataKey;
}
if (part.dataStoreETag) {
partResult.dataStoreETag = part.dataStoreETag;
}
if (partRetrievalInfo.dataStoreVersionId) {
partResult.dataStoreVersionId =
partRetrievalInfo.dataStoreVersionId;
}
return cb(null, partResult);
}),
/**
* _dataCopyPut - put used for copying object with and without
* encryption
* @param {string} serverSideEncryption - Server side encryption
* @param {object} stream - stream containing the data
* @param {object} part - element of dataLocator array
* @param {object} dataStoreContext - information of the
* destination object
* dataStoreContext.bucketName: destination bucket name,
* dataStoreContext.owner: owner,
* dataStoreContext.namespace: request namespace,
* dataStoreContext.objectKey: destination object key name,
* @param {BackendInfo} destBackendInfo - Instance of BackendInfo:
* Represents the info necessary to evaluate which data backend to use
* on a data put call.
* @param {object} log - Werelogs request logger
* @param {function} cb - callback
* @returns {function} cb - callback
*/
_dataCopyPut: (serverSideEncryption, stream, part, dataStoreContext,
destBackendInfo, log, cb) => {
if (serverSideEncryption) {
return kms.createCipherBundle(
serverSideEncryption,
log, (err, cipherBundle) => {
if (err) {
log.debug('error getting cipherBundle');
return cb(errors.InternalError);
}
return data._putForCopy(cipherBundle, stream, part,
dataStoreContext, destBackendInfo, log, cb);
});
}
// Copied object is not encrypted so just put it
// without a cipherBundle
return data._putForCopy(null, stream, part, dataStoreContext,
destBackendInfo, log, cb);
},
/**
* copyObject - copy object
* @param {object} request - request object
* @param {string} sourceLocationConstraintName -
* source locationContraint name (awsbackend, azurebackend, ...)
* @param {object} storeMetadataParams - metadata information of the
* source object
* @param {array} dataLocator - source object metadata location(s)
* NOTE: for Azure and AWS data backend this array only has one item
* @param {object} dataStoreContext - information of the
* destination object
* dataStoreContext.bucketName: destination bucket name,
* dataStoreContext.owner: owner,
* dataStoreContext.namespace: request namespace,
* dataStoreContext.objectKey: destination object key name,
* @param {BackendInfo} destBackendInfo - Instance of BackendInfo:
* Represents the info necessary to evaluate which data backend to use
* on a data put call.
* @param {object} sourceBucketMD - metadata of the source bucket
* @param {object} destBucketMD - metadata of the destination bucket
* @param {object} serverSideEncryption - server side encryption configuration
* @param {object} log - Werelogs request logger
* @param {function} cb - callback
* @returns {function} cb - callback
*/
copyObject: (request,
sourceLocationConstraintName, storeMetadataParams, dataLocator,
dataStoreContext, destBackendInfo, sourceBucketMD, destBucketMD,
serverSideEncryption, log, cb) => {
if (config.backends.data === 'multiple' &&
utils.externalBackendCopy(sourceLocationConstraintName,
storeMetadataParams.dataStoreName, sourceBucketMD, destBucketMD)
&& serverSideEncryption === null) {
const destLocationConstraintName =
storeMetadataParams.dataStoreName;
const objectGetInfo = dataLocator[0];
const externalSourceKey = objectGetInfo.key;
return client.copyObject(request, destLocationConstraintName,
externalSourceKey, sourceLocationConstraintName,
storeMetadataParams, log, (error, objectRetrievalInfo) => {
if (error) {
return cb(error);
}
const putResult = {
key: objectRetrievalInfo.key,
dataStoreName: objectRetrievalInfo.
dataStoreName,
dataStoreType: objectRetrievalInfo.
dataStoreType,
dataStoreVersionId:
objectRetrievalInfo.dataStoreVersionId,
size: storeMetadataParams.size,
dataStoreETag: objectGetInfo.dataStoreETag,
start: objectGetInfo.start,
};
const putResultArr = [putResult];
return cb(null, putResultArr);
});
}
// dataLocator is an array. need to get and put all parts
// For now, copy 1 part at a time. Could increase the second
// argument here to increase the number of parts
// copied at once.
return async.mapLimit(dataLocator, 1,
// eslint-disable-next-line prefer-arrow-callback
function copyPart(part, copyCb) {
if (part.dataStoreType === 'azure') {
const passThrough = new PassThrough();
return async.parallel([
parallelCb => data.get(part, passThrough, log, err =>
parallelCb(err)),
parallelCb => data._dataCopyPut(serverSideEncryption,
passThrough,
part, dataStoreContext, destBackendInfo, log,
parallelCb),
], (err, res) => {
if (err) {
return copyCb(err);
}
return copyCb(null, res[1]);
});
}
return data.get(part, null, log, (err, stream) => {
if (err) {
return copyCb(err);
}
return data._dataCopyPut(serverSideEncryption, stream,
part, dataStoreContext, destBackendInfo, log, copyCb);
});
}, (err, results) => {
if (err) {
log.debug('error transferring data from source',
{ error: err });
return cb(err);
}
return cb(null, results);
});
},
_dataCopyPutPart: (request,
serverSideEncryption, stream, part,
dataStoreContext, destBackendInfo, locations, log, cb) => {
const numberPartSize =
Number.parseInt(part.size, 10);
const partNumber = Number.parseInt(request.query.partNumber, 10);
const uploadId = request.query.uploadId;
const destObjectKey = request.objectKey;
const destBucketName = request.bucketName;
const destLocationConstraintName = destBackendInfo
.getControllingLocationConstraint();
if (externalBackends[config
.locationConstraints[destLocationConstraintName]
.type]) {
return multipleBackendGateway.uploadPart(null, null,
stream, numberPartSize,
destLocationConstraintName, destObjectKey, uploadId,
partNumber, destBucketName, log,
(err, partInfo) => {
if (err) {
log.error('error putting ' +
'part to AWS', {
error: err,
method:
'objectPutCopyPart::' +
'multipleBackendGateway.' +
'uploadPart',
});
return cb(errors.ServiceUnavailable);
}
// skip to end of waterfall
// because don't need to store
// part metadata
if (partInfo &&
partInfo.dataStoreType === 'aws_s3') {
// if data backend handles MPU, skip to end
// of waterfall
const partResult = {
dataStoreETag: partInfo.dataStoreETag,
};
locations.push(partResult);
return cb(skipError, partInfo.dataStoreETag);
} else if (
partInfo &&
partInfo.dataStoreType === 'azure') {
const partResult = {
key: partInfo.key,
dataStoreName: partInfo.dataStoreName,
dataStoreETag: partInfo.dataStoreETag,
size: numberPartSize,
numberSubParts:
partInfo.numberSubParts,
partNumber: partInfo.partNumber,
};
locations.push(partResult);
return cb();
}
return cb(skipError);
});
}
if (serverSideEncryption) {
return kms.createCipherBundle(
serverSideEncryption,
log, (err, cipherBundle) => {
if (err) {
log.debug('error getting cipherBundle',
{ error: err });
return cb(errors.InternalError);
}
return data.put(cipherBundle, stream,
numberPartSize, dataStoreContext,
destBackendInfo, log,
(error, partRetrievalInfo,
hashedStream) => {
if (error) {
log.debug('error putting ' +
'encrypted part', { error });
return cb(error);
}
const partResult = {
key: partRetrievalInfo.key,
dataStoreName: partRetrievalInfo
.dataStoreName,
dataStoreETag: hashedStream
.completedHash,
// Do not include part start
// here since will change in
// final MPU object
size: numberPartSize,
sseCryptoScheme: cipherBundle
.cryptoScheme,
sseCipheredDataKey: cipherBundle
.cipheredDataKey,
sseAlgorithm: cipherBundle
.algorithm,
sseMasterKeyId: cipherBundle
.masterKeyId,
};
locations.push(partResult);
return cb();
});
});
}
// Copied object is not encrypted so just put it
// without a cipherBundle
return data.put(null, stream, numberPartSize,
dataStoreContext, destBackendInfo,
log, (error, partRetrievalInfo, hashedStream) => {
if (error) {
log.debug('error putting object part',
{ error });
return cb(error);
}
const partResult = {
key: partRetrievalInfo.key,
dataStoreName: partRetrievalInfo.dataStoreName,
dataStoreETag: hashedStream.completedHash,
size: numberPartSize,
};
locations.push(partResult);
return cb();
});
},
/**
* uploadPartCopy - put copy part
* @param {object} request - request object
* @param {object} log - Werelogs request logger
* @param {object} destBucketMD - destination bucket metadata
* @param {string} sourceLocationConstraintName -
* source locationContraint name (awsbackend, azurebackend, ...)
* @param {string} destLocationConstraintName -
* location of the destination MPU object (awsbackend, azurebackend, ...)
* @param {array} dataLocator - source object metadata location(s)
* NOTE: for Azure and AWS data backend this array
* @param {object} dataStoreContext - information of the
* destination object
* dataStoreContext.bucketName: destination bucket name,
* dataStoreContext.owner: owner,
* dataStoreContext.namespace: request namespace,
* dataStoreContext.objectKey: destination object key name,
* dataStoreContext.uploadId: uploadId
* dataStoreContext.partNumber: request.query.partNumber
* @param {function} callback - callback
* @returns {function} cb - callback
*/
uploadPartCopy: (request, log, destBucketMD, sourceLocationConstraintName,
destLocationConstraintName, dataLocator, dataStoreContext,
callback) => {
const serverSideEncryption = destBucketMD.getServerSideEncryption();
const lastModified = new Date().toJSON();
// skip if 0 byte object
if (dataLocator.length === 0) {
return process.nextTick(() => {
callback(null, constants.emptyFileMd5,
lastModified, serverSideEncryption, []);
});
}
// if destination mpu was initiated in legacy version
if (destLocationConstraintName === undefined) {
const backendInfoObj = locationConstraintCheck(request,
null, destBucketMD, log);
if (backendInfoObj.err) {
return process.nextTick(() => {
callback(backendInfoObj.err);
});
}
// eslint-disable-next-line no-param-reassign
destLocationConstraintName = backendInfoObj.controllingLC;
}
const locationTypeMatchAWS =
config.backends.data === 'multiple' &&
config.getLocationConstraintType(sourceLocationConstraintName) ===
config.getLocationConstraintType(destLocationConstraintName) &&
config.getLocationConstraintType(sourceLocationConstraintName) ===
'aws_s3';
// NOTE: using multipleBackendGateway.uploadPartCopy only if copying
// from AWS to AWS
if (locationTypeMatchAWS && dataLocator.length === 1) {
const awsSourceKey = dataLocator[0].key;
return multipleBackendGateway.uploadPartCopy(request,
destLocationConstraintName, awsSourceKey,
sourceLocationConstraintName, log, (error, eTag) => {
if (error) {
return callback(error);
}
return callback(skipError, eTag,
lastModified, serverSideEncryption);
});
}
const backendInfo = new BackendInfo(destLocationConstraintName);
// totalHash will be sent through the RelayMD5Sum transform streams
// to collect the md5 from multiple streams
let totalHash;
const locations = [];
// dataLocator is an array. need to get and put all parts
// in order so can get the ETag of full object
return async.forEachOfSeries(dataLocator,
// eslint-disable-next-line prefer-arrow-callback
function copyPart(part, index, cb) {
if (part.dataStoreType === 'azure') {
const passThrough = new PassThrough();
return async.parallel([
next => data.get(part, passThrough, log, err => {
if (err) {
log.error('error getting data part ' +
'from Azure', {
error: err,
method:
'objectPutCopyPart::' +
'multipleBackendGateway.' +
'copyPart',
});
return next(err);
}
return next();
}),
next => data._dataCopyPutPart(request,
serverSideEncryption, passThrough, part,
dataStoreContext, backendInfo, locations, log, next),
], err => {
if (err) {
return cb(err);
}
return cb();
});
}
return data.get(part, null, log, (err, stream) => {
if (err) {
log.debug('error getting object part',
{ error: err });
return cb(err);
}
const hashedStream =
new RelayMD5Sum(totalHash, updatedHash => {
totalHash = updatedHash;
});
stream.pipe(hashedStream);
// destLocationConstraintName is location of the
// destination MPU object
return data._dataCopyPutPart(request,
serverSideEncryption, hashedStream, part,
dataStoreContext, backendInfo, locations, log, cb);
});
}, err => {
// Digest the final combination of all of the part streams
if (err && err !== skipError) {
log.debug('error transferring data from source',
{ error: err, method: 'goGetData' });
return callback(err);
}
if (totalHash) {
totalHash = totalHash.digest('hex');
} else {
totalHash = locations[0].dataStoreETag;
}
if (err && err === skipError) {
return callback(skipError, totalHash,
lastModified, serverSideEncryption);
}
return callback(null, totalHash,
lastModified, serverSideEncryption, locations);
});
},
};
module.exports = data;

View File

@ -11,7 +11,6 @@ const Common = require('./common');
let scalityKMS; let scalityKMS;
let scalityKMSImpl; let scalityKMSImpl;
try { try {
// eslint-disable-next-line import/no-unresolved
const ScalityKMS = require('scality-kms'); const ScalityKMS = require('scality-kms');
scalityKMS = new ScalityKMS(config.kms); scalityKMS = new ScalityKMS(config.kms);
scalityKMSImpl = 'scalityKms'; scalityKMSImpl = 'scalityKms';

View File

@ -13,7 +13,6 @@ const versionSep = arsenal.versioning.VersioningConstants.VersionId.Separator;
const METASTORE = '__metastore'; const METASTORE = '__metastore';
class BucketFileInterface { class BucketFileInterface {
/** /**
* @constructor * @constructor
* @param {object} [params] - constructor params * @param {object} [params] - constructor params
@ -84,7 +83,7 @@ class BucketFileInterface {
createBucket(bucketName, bucketMD, log, cb) { createBucket(bucketName, bucketMD, log, cb) {
this.getBucketAttributes(bucketName, log, err => { this.getBucketAttributes(bucketName, log, err => {
if (err && err !== errors.NoSuchBucket) { if (err && !err.is.NoSuchBucket) {
return cb(err); return cb(err);
} }
if (err === undefined) { if (err === undefined) {

View File

@ -146,7 +146,7 @@ const metastore = {
return cb(null, { return cb(null, {
bucket: bucket.serialize(), bucket: bucket.serialize(),
obj: JSON.stringify( obj: JSON.stringify(
metadata.keyMaps.get(bucketName).get(objName) metadata.keyMaps.get(bucketName).get(objName),
), ),
}); });
}); });

View File

@ -11,7 +11,7 @@ const locationConstraintCheck = require(
'../api/apiUtils/object/locationConstraintCheck'); '../api/apiUtils/object/locationConstraintCheck');
const { dataStore } = require('../api/apiUtils/object/storeObject'); const { dataStore } = require('../api/apiUtils/object/storeObject');
const prepareRequestContexts = require( const prepareRequestContexts = require(
'../api/apiUtils/authorization/prepareRequestContexts'); '../api/apiUtils/authorization/prepareRequestContexts');
const { decodeVersionId } = require('../api/apiUtils/object/versioning'); const { decodeVersionId } = require('../api/apiUtils/object/versioning');
const locationKeysHaveChanged const locationKeysHaveChanged
= require('../api/apiUtils/object/locationKeysHaveChanged'); = require('../api/apiUtils/object/locationKeysHaveChanged');
@ -719,7 +719,7 @@ function batchDelete(request, response, log, callback) {
log.trace('batch delete locations', { locations }); log.trace('batch delete locations', { locations });
return async.eachLimit(locations, 5, (loc, next) => { return async.eachLimit(locations, 5, (loc, next) => {
data.delete(loc, log, err => { data.delete(loc, log, err => {
if (err && err.ObjNotFound) { if (err?.is.ObjNotFound) {
log.info('batch delete: data location do not exist', { log.info('batch delete: data location do not exist', {
method: 'batchDelete', method: 'batchDelete',
location: loc, location: loc,

View File

@ -16,7 +16,7 @@ function getMetricToPush(prevObjectMD, newObjectMD) {
assert.deepStrictEqual(prevObjectMD.getAcl(), newObjectMD.getAcl()); assert.deepStrictEqual(prevObjectMD.getAcl(), newObjectMD.getAcl());
assert.deepStrictEqual( assert.deepStrictEqual(
prevObjectMD.getTags(), prevObjectMD.getTags(),
newObjectMD.getTags() newObjectMD.getTags(),
); );
} catch (e) { } catch (e) {
return 'replicateTags'; return 'replicateTags';

View File

@ -10,10 +10,18 @@ const { clientCheck } = require('./utilities/healthcheckHandler');
const _config = require('./Config').config; const _config = require('./Config').config;
const { blacklistedPrefixes } = require('../constants'); const { blacklistedPrefixes } = require('../constants');
const api = require('./api/api'); const api = require('./api/api');
const data = require('./data/wrapper'); const dataWrapper = require('./data/wrapper');
const kms = require('./kms/wrapper');
const locationStorageCheck =
require('./api/apiUtils/object/locationStorageCheck');
const vault = require('./auth/vault');
const metadata = require('./metadata/wrapper');
const routes = arsenal.s3routes.routes; const routes = arsenal.s3routes.routes;
const { parseLC, MultipleBackendGateway } = arsenal.storage.data;
const websiteEndpoints = _config.websiteEndpoints; const websiteEndpoints = _config.websiteEndpoints;
let client = dataWrapper.client;
const implName = dataWrapper.implName;
let allEndpoints; let allEndpoints;
function updateAllEndpoints() { function updateAllEndpoints() {
@ -21,6 +29,13 @@ function updateAllEndpoints() {
} }
_config.on('rest-endpoints-update', updateAllEndpoints); _config.on('rest-endpoints-update', updateAllEndpoints);
updateAllEndpoints(); updateAllEndpoints();
_config.on('location-constraints-update', () => {
if (implName === 'multipleBackends') {
const clients = parseLC(_config, vault);
client = new MultipleBackendGateway(
clients, metadata, locationStorageCheck);
}
});
// redis client // redis client
let localCacheClient; let localCacheClient;
@ -78,7 +93,15 @@ class S3Server {
allEndpoints, allEndpoints,
websiteEndpoints, websiteEndpoints,
blacklistedPrefixes, blacklistedPrefixes,
dataRetrievalFn: data.get, dataRetrievalParams: {
client,
implName,
config: _config,
kms,
metadata,
locStorageCheckFn: locationStorageCheck,
vault,
},
}; };
routes(req, res, params, logger, _config); routes(req, res, params, logger, _config);
} }
@ -144,7 +167,7 @@ class S3Server {
cleanUp() { cleanUp() {
logger.info('server shutting down'); logger.info('server shutting down');
Promise.all(this.servers.map(server => Promise.all(this.servers.map(server =>
new Promise(resolve => server.close(resolve)) new Promise(resolve => server.close(resolve)),
)).then(() => process.exit(0)); )).then(() => process.exit(0));
} }

View File

@ -42,7 +42,7 @@ const services = {
// buckets to list. By returning an empty array, the // buckets to list. By returning an empty array, the
// getService API will just respond with the user info // getService API will just respond with the user info
// without listing any buckets. // without listing any buckets.
if (err && err.NoSuchBucket) { if (err?.is.NoSuchBucket) {
log.trace('no buckets found'); log.trace('no buckets found');
// If we checked the old user bucket, that means we // If we checked the old user bucket, that means we
// already checked the new user bucket. If neither the // already checked the new user bucket. If neither the
@ -555,7 +555,7 @@ const services = {
// If the MPU was initiated, the mpu bucket should exist. // If the MPU was initiated, the mpu bucket should exist.
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`; const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
metadata.getBucket(mpuBucketName, log, (err, mpuBucket) => { metadata.getBucket(mpuBucketName, log, (err, mpuBucket) => {
if (err && err.NoSuchBucket) { if (err?.is.NoSuchBucket) {
log.debug('bucket not found in metadata', { error: err, log.debug('bucket not found in metadata', { error: err,
method: 'services.metadataValidateMultipart' }); method: 'services.metadataValidateMultipart' });
return cb(errors.NoSuchUpload); return cb(errors.NoSuchUpload);
@ -577,7 +577,7 @@ const services = {
metadata.getObjectMD(mpuBucket.getName(), mpuOverviewKey, metadata.getObjectMD(mpuBucket.getName(), mpuOverviewKey,
{}, log, (err, storedMetadata) => { {}, log, (err, storedMetadata) => {
if (err) { if (err) {
if (err.NoSuchKey) { if (err.is.NoSuchKey) {
return cb(errors.NoSuchUpload); return cb(errors.NoSuchUpload);
} }
log.error('error from metadata', { error: err }); log.error('error from metadata', { error: err });
@ -753,7 +753,7 @@ const services = {
assert.strictEqual(typeof bucketName, 'string'); assert.strictEqual(typeof bucketName, 'string');
const MPUBucketName = `${constants.mpuBucketPrefix}${bucketName}`; const MPUBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
metadata.getBucket(MPUBucketName, log, (err, bucket) => { metadata.getBucket(MPUBucketName, log, (err, bucket) => {
if (err && err.NoSuchBucket) { if (err?.is.NoSuchBucket) {
log.trace('no buckets found'); log.trace('no buckets found');
const creationDate = new Date().toJSON(); const creationDate = new Date().toJSON();
const mpuBucket = new BucketInfo(MPUBucketName, const mpuBucket = new BucketInfo(MPUBucketName,

View File

@ -241,7 +241,7 @@ aclUtils.convertToXml = grantInfo => {
`<DisplayName>${escapeForXml(ownerInfo.displayName)}` + `<DisplayName>${escapeForXml(ownerInfo.displayName)}` +
'</DisplayName>', '</DisplayName>',
'</Owner>', '</Owner>',
'<AccessControlList>' '<AccessControlList>',
); );
grants.forEach(grant => { grants.forEach(grant => {
@ -252,29 +252,29 @@ aclUtils.convertToXml = grantInfo => {
if (grant.ID) { if (grant.ID) {
xml.push('<Grantee xmlns:xsi="http://www.w3.org/2001/' + xml.push('<Grantee xmlns:xsi="http://www.w3.org/2001/' +
'XMLSchema-instance" xsi:type="CanonicalUser">', 'XMLSchema-instance" xsi:type="CanonicalUser">',
`<ID>${grant.ID}</ID>` `<ID>${grant.ID}</ID>`,
); );
} else if (grant.URI) { } else if (grant.URI) {
xml.push('<Grantee xmlns:xsi="http://www.w3.org/2001/' + xml.push('<Grantee xmlns:xsi="http://www.w3.org/2001/' +
'XMLSchema-instance" xsi:type="Group">', 'XMLSchema-instance" xsi:type="Group">',
`<URI>${escapeForXml(grant.URI)}</URI>` `<URI>${escapeForXml(grant.URI)}</URI>`,
); );
} }
if (grant.displayName) { if (grant.displayName) {
xml.push(`<DisplayName>${escapeForXml(grant.displayName)}` + xml.push(`<DisplayName>${escapeForXml(grant.displayName)}` +
'</DisplayName>' '</DisplayName>',
); );
} }
xml.push('</Grantee>', xml.push('</Grantee>',
`<Permission>${grant.permission}</Permission>`, `<Permission>${grant.permission}</Permission>`,
'</Grant>' '</Grant>',
); );
}); });
xml.push('</AccessControlList>', xml.push('</AccessControlList>',
'</AccessControlPolicy>' '</AccessControlPolicy>',
); );
return xml.join(''); return xml.join('');
@ -351,7 +351,7 @@ aclUtils.getCanonicalIDs = function getCanonicalIDs(acl) {
acl.WRITE, acl.WRITE,
acl.WRITE_ACP, acl.WRITE_ACP,
acl.READ, acl.READ,
acl.READ_ACP acl.READ_ACP,
); );
const uniqueGrantees = Array.from(new Set(aclGrantees)); const uniqueGrantees = Array.from(new Set(aclGrantees));
// grantees can be a mix of canonicalIDs and predefined groups in the form // grantees can be a mix of canonicalIDs and predefined groups in the form

View File

@ -70,7 +70,7 @@ function clientCheck(flightCheckOnStartUp, log, cb) {
// if there is an error from an external backend, // if there is an error from an external backend,
// only return a 500 if it is on startup // only return a 500 if it is on startup
// (flightCheckOnStartUp set to true) // (flightCheckOnStartUp set to true)
obj[k].error && (flightCheckOnStartUp || !obj[k].external) obj[k].error && (flightCheckOnStartUp || !obj[k].external),
); );
if (fail) { if (fail) {
return cb(errors.InternalError, obj); return cb(errors.InternalError, obj);

View File

@ -20,7 +20,7 @@
"homepage": "https://github.com/scality/S3#readme", "homepage": "https://github.com/scality/S3#readme",
"dependencies": { "dependencies": {
"@hapi/joi": "^17.1.0", "@hapi/joi": "^17.1.0",
"arsenal": "git+https://github.com/scality/arsenal#7.10.15", "arsenal": "git+https://github.com/scality/arsenal#7.10.23",
"async": "~2.5.0", "async": "~2.5.0",
"aws-sdk": "2.905.0", "aws-sdk": "2.905.0",
"azure-storage": "^2.1.0", "azure-storage": "^2.1.0",
@ -44,7 +44,7 @@
}, },
"devDependencies": { "devDependencies": {
"bluebird": "^3.3.1", "bluebird": "^3.3.1",
"eslint": "^2.4.0", "eslint": "^8.14.0",
"eslint-config-airbnb": "^6.0.0", "eslint-config-airbnb": "^6.0.0",
"eslint-config-scality": "scality/Guidelines#7.10.2", "eslint-config-scality": "scality/Guidelines#7.10.2",
"ioredis": "4.9.5", "ioredis": "4.9.5",

View File

@ -1,2 +1,2 @@
'use strict'; // eslint-disable-line strict 'use strict'; // eslint-disable-line strict
require('./test.js'); // eslint-disable-line import/no-unresolved require('./test.js');

View File

@ -30,7 +30,7 @@ class BucketUtility {
createMany(bucketNames) { createMany(bucketNames) {
const promises = bucketNames.map( const promises = bucketNames.map(
bucketName => this.createOne(bucketName) bucketName => this.createOne(bucketName),
); );
return Promise.all(promises); return Promise.all(promises);
@ -57,7 +57,7 @@ class BucketUtility {
deleteMany(bucketNames) { deleteMany(bucketNames) {
const promises = bucketNames.map( const promises = bucketNames.map(
bucketName => this.deleteOne(bucketName) bucketName => this.deleteOne(bucketName),
); );
return Promise.all(promises); return Promise.all(promises);
@ -87,7 +87,7 @@ class BucketUtility {
Key: object.Key, Key: object.Key,
VersionId: object.VersionId, VersionId: object.VersionId,
}).promise() }).promise()
.then(() => object) .then(() => object),
) )
.concat(data.Versions .concat(data.Versions
.filter(object => object.Key.endsWith('/')) .filter(object => object.Key.endsWith('/'))
@ -98,8 +98,8 @@ class BucketUtility {
Key: object.Key, Key: object.Key,
VersionId: object.VersionId, VersionId: object.VersionId,
}).promise() }).promise()
.then(() => object) .then(() => object),
) ),
) )
.concat(data.DeleteMarkers .concat(data.DeleteMarkers
.map(object => .map(object =>
@ -108,14 +108,14 @@ class BucketUtility {
Key: object.Key, Key: object.Key,
VersionId: object.VersionId, VersionId: object.VersionId,
}).promise() }).promise()
.then(() => object))) .then(() => object))),
) ),
); );
} }
emptyMany(bucketNames) { emptyMany(bucketNames) {
const promises = bucketNames.map( const promises = bucketNames.map(
bucketName => this.empty(bucketName) bucketName => this.empty(bucketName),
); );
return Promise.all(promises); return Promise.all(promises);

View File

@ -179,7 +179,7 @@ withV4(sigCfg => {
assert.notStrictEqual(err, null); assert.notStrictEqual(err, null);
assert.strictEqual( assert.strictEqual(
err.statusCode, err.statusCode,
errors.AccessDenied.code errors.AccessDenied.code,
); );
} }
done(); done();

View File

@ -81,7 +81,7 @@ describe('aws-node-sdk test deleteBucketReplication', () => {
}), }),
next => deleteReplicationAndCheckResponse(bucket, next), next => deleteReplicationAndCheckResponse(bucket, next),
next => s3.getBucketReplication({ Bucket: bucket }, err => { next => s3.getBucketReplication({ Bucket: bucket }, err => {
assert(errors.ReplicationConfigurationNotFoundError[err.code]); assert(errors.ReplicationConfigurationNotFoundError.is[err.code]);
return next(); return next();
}), }),
], done)); ], done));

View File

@ -45,7 +45,7 @@ describe('aws-node-sdk test getBucketReplication', () => {
it("should return 'ReplicationConfigurationNotFoundError' if bucket does " + it("should return 'ReplicationConfigurationNotFoundError' if bucket does " +
'not have a replication configuration', done => 'not have a replication configuration', done =>
s3.getBucketReplication({ Bucket: bucket }, err => { s3.getBucketReplication({ Bucket: bucket }, err => {
assert(errors.ReplicationConfigurationNotFoundError[err.code]); assert(errors.ReplicationConfigurationNotFoundError.is[err.code]);
return done(); return done();
})); }));

View File

@ -137,21 +137,21 @@ describe('PUT Bucket - AWS.S3.createBucket', () => {
done => { done => {
const longName = 'x'.repeat(64); const longName = 'x'.repeat(64);
testFn(longName, done); testFn(longName, done);
} },
); );
itSkipIfAWS('should return 400 if name is formatted as IP address', itSkipIfAWS('should return 400 if name is formatted as IP address',
done => { done => {
const ipAddress = '192.168.5.4'; const ipAddress = '192.168.5.4';
testFn(ipAddress, done); testFn(ipAddress, done);
} },
); );
itSkipIfAWS('should return 400 if name starts with period', itSkipIfAWS('should return 400 if name starts with period',
done => { done => {
const invalidName = '.myawsbucket'; const invalidName = '.myawsbucket';
testFn(invalidName, done); testFn(invalidName, done);
} },
); );
it('should return 400 if name ends with period', done => { it('should return 400 if name ends with period', done => {
@ -164,7 +164,7 @@ describe('PUT Bucket - AWS.S3.createBucket', () => {
done => { done => {
const invalidName = 'my..examplebucket'; const invalidName = 'my..examplebucket';
testFn(invalidName, done); testFn(invalidName, done);
} },
); );
it('should return 400 if name has special chars', done => { it('should return 400 if name has special chars', done => {

View File

@ -36,6 +36,6 @@ describe('aws-node-sdk stress test bucket', function testSuite() {
next => putObjects(s3, loopId, err => next(err)), next => putObjects(s3, loopId, err => next(err)),
next => deleteObjects(s3, loopId, err => next(err)), next => deleteObjects(s3, loopId, err => next(err)),
next => s3.deleteBucket({ Bucket: bucket }, err => next(err)), next => s3.deleteBucket({ Bucket: bucket }, err => next(err)),
], err => next(err)), done) ], err => next(err)), done),
); );
}); });

View File

@ -139,7 +139,7 @@ function getObjectsAndAssertAcls(s3, key, versionIds, expectedData,
} }
describeSkipIfNotMultiple('AWS backend put/get object acl with versioning', describeSkipIfNotMultiple('AWS backend put/get object acl with versioning',
function testSuite() { function testSuite() {
this.timeout(30000); this.timeout(30000);
withV4(sigCfg => { withV4(sigCfg => {
let bucketUtil; let bucketUtil;
@ -251,4 +251,4 @@ function testSuite() {
], done); ], done);
}); });
}); });
}); });

View File

@ -26,7 +26,7 @@ const nonExistingId = process.env.AWS_ON_AIR ?
'3939393939393939393936493939393939393939756e6437'; '3939393939393939393936493939393939393939756e6437';
describeSkipIfNotMultiple('Multiple backend delete object from Azure', describeSkipIfNotMultiple('Multiple backend delete object from Azure',
function testSuite() { function testSuite() {
this.timeout(250000); this.timeout(250000);
withV4(sigCfg => { withV4(sigCfg => {
let bucketUtil; let bucketUtil;
@ -248,4 +248,4 @@ function testSuite() {
}); });
}); });
}); });
}); });

View File

@ -36,7 +36,7 @@ function getAndAssertVersions(s3, bucket, key, versionIds, expectedData,
} }
describeSkipIfNotMultiple('AWS backend get object with versioning', describeSkipIfNotMultiple('AWS backend get object with versioning',
function testSuite() { function testSuite() {
this.timeout(30000); this.timeout(30000);
withV4(sigCfg => { withV4(sigCfg => {
let bucketUtil; let bucketUtil;
@ -351,4 +351,4 @@ function testSuite() {
], done); ], done);
}); });
}); });
}); });

View File

@ -22,7 +22,7 @@ const normalBody = Buffer.from('I am a body', 'utf8');
const azureTimeout = 10000; const azureTimeout = 10000;
describeSkipIfNotMultiple('Multiple backend get object from Azure', describeSkipIfNotMultiple('Multiple backend get object from Azure',
function testSuite() { function testSuite() {
this.timeout(30000); this.timeout(30000);
withV4(sigCfg => { withV4(sigCfg => {
let bucketUtil; let bucketUtil;
@ -160,4 +160,4 @@ function testSuite() {
}); });
}); });
}); });
}); });

View File

@ -101,7 +101,7 @@ function mpuSetup(key, location, cb) {
} }
describeSkipIfNotMultiple('Complete MPU API for Azure data backend', describeSkipIfNotMultiple('Complete MPU API for Azure data backend',
function testSuite() { function testSuite() {
this.timeout(150000); this.timeout(150000);
withV4(sigCfg => { withV4(sigCfg => {
beforeEach(function beFn() { beforeEach(function beFn() {
@ -250,4 +250,4 @@ function testSuite() {
}); });
}); });
}); });
}); });

View File

@ -93,7 +93,7 @@ function completeAndAssertMpu(s3, params, cb) {
} }
describeSkipIfNotMultiple('AWS backend complete mpu with versioning', describeSkipIfNotMultiple('AWS backend complete mpu with versioning',
function testSuite() { function testSuite() {
this.timeout(120000); this.timeout(120000);
withV4(sigCfg => { withV4(sigCfg => {
const bucketUtil = new BucketUtility('default', sigCfg); const bucketUtil = new BucketUtility('default', sigCfg);
@ -173,4 +173,4 @@ function testSuite() {
], done); ], done);
}); });
}); });
}); });

View File

@ -63,7 +63,7 @@ function putSourceObj(key, location, objSize, bucket, cb) {
} }
function assertGetObjects(sourceKey, sourceBucket, sourceLoc, destKey, function assertGetObjects(sourceKey, sourceBucket, sourceLoc, destKey,
destBucket, destLoc, azureKey, mdDirective, objSize, callback) { destBucket, destLoc, azureKey, mdDirective, objSize, callback) {
const sourceGetParams = { Bucket: sourceBucket, Key: sourceKey }; const sourceGetParams = { Bucket: sourceBucket, Key: sourceKey };
const destGetParams = { Bucket: destBucket, Key: destKey }; const destGetParams = { Bucket: destBucket, Key: destKey };
async.series([ async.series([
@ -112,7 +112,7 @@ destBucket, destLoc, azureKey, mdDirective, objSize, callback) {
} }
describeSkipIfNotMultiple('MultipleBackend object copy: Azure', describeSkipIfNotMultiple('MultipleBackend object copy: Azure',
function testSuite() { function testSuite() {
this.timeout(250000); this.timeout(250000);
withV4(sigCfg => { withV4(sigCfg => {
beforeEach(function beFn() { beforeEach(function beFn() {
@ -612,4 +612,4 @@ function testSuite() {
}); });
}); });
}); });
}); });

View File

@ -48,8 +48,8 @@ function putSourceObj(location, isEmptyObj, bucket, cb) {
} }
function assertGetObjects(sourceKey, sourceBucket, sourceLoc, destKey, function assertGetObjects(sourceKey, sourceBucket, sourceLoc, destKey,
destBucket, destLoc, awsKey, mdDirective, isEmptyObj, awsS3, awsLocation, destBucket, destLoc, awsKey, mdDirective, isEmptyObj, awsS3, awsLocation,
callback) { callback) {
const awsBucket = const awsBucket =
config.locationConstraints[awsLocation].details.bucketName; config.locationConstraints[awsLocation].details.bucketName;
const sourceGetParams = { Bucket: sourceBucket, Key: sourceKey }; const sourceGetParams = { Bucket: sourceBucket, Key: sourceKey };
@ -106,7 +106,7 @@ callback) {
} }
describeSkipIfNotMultiple('MultipleBackend object copy: AWS', describeSkipIfNotMultiple('MultipleBackend object copy: AWS',
function testSuite() { function testSuite() {
this.timeout(250000); this.timeout(250000);
withV4(sigCfg => { withV4(sigCfg => {
beforeEach(() => { beforeEach(() => {
@ -634,4 +634,4 @@ function testSuite() {
}); });
}); });
}); });
}); });

View File

@ -170,7 +170,7 @@ function assertGetObjects(testParams, cb) {
} }
describeSkipIfNotMultiple('AWS backend object copy with versioning', describeSkipIfNotMultiple('AWS backend object copy with versioning',
function testSuite() { function testSuite() {
this.timeout(250000); this.timeout(250000);
withV4(sigCfg => { withV4(sigCfg => {
bucketUtil = new BucketUtility('default', sigCfg); bucketUtil = new BucketUtility('default', sigCfg);
@ -196,7 +196,7 @@ function testSuite() {
`in afterEach: ${err}\n`); `in afterEach: ${err}\n`);
throw err; throw err;
} }
}) }),
); );
[{ [{
@ -416,4 +416,4 @@ function testSuite() {
}); });
}); });
}); });
}); });

View File

@ -583,7 +583,7 @@ describeSkipIfNotMultiple('Put Copy Part to AZURE', function describeF() {
}); });
describeSkipIfNotMultiple('Put Copy Part to AZURE with large object', describeSkipIfNotMultiple('Put Copy Part to AZURE with large object',
function describeF() { function describeF() {
this.timeout(800000); this.timeout(800000);
withV4(sigCfg => { withV4(sigCfg => {
beforeEach(() => { beforeEach(() => {
@ -675,10 +675,10 @@ function describeF() {
}); });
}); });
}); });
}); });
describeSkipIfNotMultiple('Put Copy Part to AZURE with complete MPU', describeSkipIfNotMultiple('Put Copy Part to AZURE with complete MPU',
function describeF() { function describeF() {
this.timeout(800000); this.timeout(800000);
withV4(sigCfg => { withV4(sigCfg => {
beforeEach(() => { beforeEach(() => {
@ -805,4 +805,4 @@ function describeF() {
}); });
}); });
}); });
}); });

View File

@ -158,7 +158,7 @@ function mpuWaterfall(params, cb) {
} }
describeSkipIfNotMultiple('Object tagging with multiple backends', describeSkipIfNotMultiple('Object tagging with multiple backends',
function testSuite() { function testSuite() {
if (!process.env.S3_END_TO_END) { if (!process.env.S3_END_TO_END) {
this.retries(2); this.retries(2);
} }
@ -381,4 +381,4 @@ function testSuite() {
}); });
}); });
}); });
}); });

View File

@ -21,7 +21,7 @@ const { putTaggingAndAssert, delTaggingAndAssert, awsGetAssertTags } = tagging;
const someBody = 'teststring'; const someBody = 'teststring';
describeSkipIfNotMultiple('AWS backend object delete tagging with versioning ', describeSkipIfNotMultiple('AWS backend object delete tagging with versioning ',
function testSuite() { function testSuite() {
this.timeout(120000); this.timeout(120000);
const tags = { key1: 'value1', key2: 'value2' }; const tags = { key1: 'value1', key2: 'value2' };
@ -221,4 +221,4 @@ function testSuite() {
], done); ], done);
}); });
}); });
}); });

View File

@ -22,7 +22,7 @@ const { putTaggingAndAssert, getTaggingAndAssert, delTaggingAndAssert,
const someBody = 'teststring'; const someBody = 'teststring';
describeSkipIfNotMultiple('AWS backend object put/get tagging with versioning', describeSkipIfNotMultiple('AWS backend object put/get tagging with versioning',
function testSuite() { function testSuite() {
this.timeout(120000); this.timeout(120000);
const tags = { key1: 'value1', key2: 'value2' }; const tags = { key1: 'value1', key2: 'value2' };
@ -283,4 +283,4 @@ function testSuite() {
], done); ], done);
}); });
}); });
}); });

View File

@ -355,7 +355,7 @@ describe('MultipleBackend put object', function testSuite() {
}); });
describeSkipIfNotMultiple('MultipleBackend put object based on bucket location', describeSkipIfNotMultiple('MultipleBackend put object based on bucket location',
() => { () => {
withV4(sigCfg => { withV4(sigCfg => {
beforeEach(() => { beforeEach(() => {
bucketUtil = new BucketUtility('default', sigCfg); bucketUtil = new BucketUtility('default', sigCfg);
@ -444,7 +444,7 @@ describeSkipIfNotMultiple('MultipleBackend put object based on bucket location',
}); });
}); });
}); });
}); });
describe('MultipleBackend put based on request endpoint', () => { describe('MultipleBackend put based on request endpoint', () => {
withV4(sigCfg => { withV4(sigCfg => {

View File

@ -45,7 +45,7 @@ describe('Abort MPU', () => {
UploadId: uploadId, UploadId: uploadId,
}).promise() }).promise()
.then(() => bucketUtil.empty(bucket)) .then(() => bucketUtil.empty(bucket))
.then(() => bucketUtil.deleteOne(bucket)) .then(() => bucketUtil.deleteOne(bucket)),
); );
// aws-sdk now (v2.363.0) returns 'UriParameterError' error // aws-sdk now (v2.363.0) returns 'UriParameterError' error

View File

@ -11,7 +11,7 @@ const log = new DummyRequestLogger();
const bucket = 'bucket2testconditionkeys'; const bucket = 'bucket2testconditionkeys';
const object = 'object2testconditionkeys'; const object = 'object2testconditionkeys';
const objPutTaggingReq = taggingUtil const objPutTaggingReq = taggingUtil
.createObjectTaggingRequest('PUT', bucket, object); .createObjectTaggingRequest('PUT', bucket, object);
const requestContexts = [createRequestContext('objectPutTagging', objPutTaggingReq)]; const requestContexts = [createRequestContext('objectPutTagging', objPutTaggingReq)];
describe('Tag condition keys updateRequestContext', () => { describe('Tag condition keys updateRequestContext', () => {
@ -46,7 +46,8 @@ describe('Tag condition keys updateRequestContext', () => {
assert.ifError(err); assert.ifError(err);
assert(newRequestContexts[0].getNeedTagEval()); assert(newRequestContexts[0].getNeedTagEval());
assert.strictEqual(newRequestContexts[0].getExistingObjTag(), tagsToExist); assert.strictEqual(newRequestContexts[0].getExistingObjTag(), tagsToExist);
assert.strictEqual(newRequestContexts[0].getRequestObjTags(), makeTagQuery(taggingUtil.getTags())); assert.strictEqual(
newRequestContexts[0].getRequestObjTags(), makeTagQuery(taggingUtil.getTags()));
done(); done();
}); });
}); });

View File

@ -84,7 +84,7 @@ describe('large mpu', function tester() {
process.stdout.write('putting parts'); process.stdout.write('putting parts');
return timesLimit(partCount, 20, (n, cb) => return timesLimit(partCount, 20, (n, cb) =>
uploadPart(n, uploadId, s3, cb), err => uploadPart(n, uploadId, s3, cb), err =>
next(err) next(err),
); );
}, },
next => { next => {

View File

@ -99,7 +99,7 @@ describe('Complete MPU', () => {
.then(result => { .then(result => {
uploadId = result.uploadId; uploadId = result.uploadId;
eTag = result.eTag; eTag = result.eTag;
}) }),
); );
it('should complete an MPU with fewer parts than were ' + it('should complete an MPU with fewer parts than were ' +
@ -118,7 +118,7 @@ describe('Complete MPU', () => {
.then(result => { .then(result => {
uploadId = result.uploadId; uploadId = result.uploadId;
eTag = result.eTag; eTag = result.eTag;
}) }),
); );
it('should complete an MPU with fewer parts than were ' + it('should complete an MPU with fewer parts than were ' +
@ -137,7 +137,7 @@ describe('Complete MPU', () => {
.then(result => { .then(result => {
uploadId = result.uploadId; uploadId = result.uploadId;
eTag = result.eTag; eTag = result.eTag;
}) }),
); );
it('should complete an MPU with fewer parts than were ' + it('should complete an MPU with fewer parts than were ' +
@ -219,7 +219,7 @@ describe('Complete MPU', () => {
.then(result => { .then(result => {
uploadId = result.uploadId; uploadId = result.uploadId;
eTag = result.eTag; eTag = result.eTag;
}) }),
); );
it('should complete the MPU successfully and leave a readable object', done => { it('should complete the MPU successfully and leave a readable object', done => {

View File

@ -49,7 +49,7 @@ describe('Object Part Copy', () => {
process.stdout.write(`Error creating source bucket: ${err}\n`); process.stdout.write(`Error creating source bucket: ${err}\n`);
throw err; throw err;
}).then(() => }).then(() =>
s3.createBucketPromise({ Bucket: destBucketName }) s3.createBucketPromise({ Bucket: destBucketName }),
).catch(err => { ).catch(err => {
process.stdout.write(`Error creating dest bucket: ${err}\n`); process.stdout.write(`Error creating dest bucket: ${err}\n`);
throw err; throw err;
@ -92,7 +92,7 @@ describe('Object Part Copy', () => {
} }
}) })
.then(() => bucketUtil.deleteMany([sourceBucketName, .then(() => bucketUtil.deleteMany([sourceBucketName,
destBucketName])) destBucketName])),
); );
@ -751,7 +751,7 @@ describe('Object Part Copy', () => {
throw err; throw err;
} }
}).then(() => otherAccountBucketUtility }).then(() => otherAccountBucketUtility
.deleteOne(otherAccountBucket)) .deleteOne(otherAccountBucket)),
); );
it('should not allow an account without read persmission on the ' + it('should not allow an account without read persmission on the ' +

View File

@ -436,7 +436,7 @@ describe('Cross Origin Resource Sharing requests', () => {
assert.strictEqual(err, null, assert.strictEqual(err, null,
`Unexpected err ${err} in beforeEach`); `Unexpected err ${err} in beforeEach`);
done(err); done(err);
}) }),
); );
afterEach(done => afterEach(done =>
@ -450,7 +450,7 @@ describe('Cross Origin Resource Sharing requests', () => {
} }
return _waitForAWS(done); return _waitForAWS(done);
}); });
}) }),
); );
it('should respond with CORS headers at website endpoint (GET)', it('should respond with CORS headers at website endpoint (GET)',

View File

@ -61,7 +61,7 @@ describe('DELETE multipart', () => {
.catch(err => { .catch(err => {
process.stdout.write(`Error in beforeEach: ${err}\n`); process.stdout.write(`Error in beforeEach: ${err}\n`);
throw err; throw err;
}) }),
); );
afterEach(() => { afterEach(() => {
@ -101,7 +101,7 @@ describe('DELETE multipart', () => {
PartNumber: 1, PartNumber: 1,
UploadId: uploadId, UploadId: uploadId,
}); });
}) }),
); );
it('should return 204 for abortMultipartUpload', done => { it('should return 204 for abortMultipartUpload', done => {

View File

@ -39,7 +39,7 @@ describe('DELETE object', () => {
PartNumber: i, PartNumber: i,
Body: testfile, Body: testfile,
UploadId: uploadId, UploadId: uploadId,
}).promise() }).promise(),
); );
} }
return Promise.all(uploads); return Promise.all(uploads);

View File

@ -71,8 +71,8 @@ function hydrateSSEConfig({ algo: SSEAlgorithm, masterKeyId: KMSMasterKeyID }) {
}, },
}, },
], ],
} },
) ),
); );
} }
@ -92,7 +92,7 @@ describe('per object encryption headers', () => {
assert.ifError(err); assert.ifError(err);
kmsKeyId = keyId; kmsKeyId = keyId;
done(); done();
} },
); );
}); });
@ -137,7 +137,7 @@ describe('per object encryption headers', () => {
} }
assert.deepStrictEqual(sseConfig, expected); assert.deepStrictEqual(sseConfig, expected);
done(); done();
} },
); );
})); }));
@ -161,9 +161,9 @@ describe('per object encryption headers', () => {
} }
res.forEach(sseConfig => assert.deepStrictEqual(sseConfig, expected)); res.forEach(sseConfig => assert.deepStrictEqual(sseConfig, expected));
done(); done();
} },
); );
} },
)); ));
testCases testCases
@ -202,7 +202,7 @@ describe('per object encryption headers', () => {
} }
assert.deepStrictEqual(sseConfig, expected); assert.deepStrictEqual(sseConfig, expected);
done(); done();
} },
); );
}); });
}); });
@ -258,7 +258,7 @@ describe('per object encryption headers', () => {
} }
assert.deepStrictEqual(sseConfig, expected); assert.deepStrictEqual(sseConfig, expected);
done(); done();
} },
); );
}); });
}); });

View File

@ -74,7 +74,7 @@ describe('GET object', () => {
const md5HashExpected = crypto.createHash('md5'); const md5HashExpected = crypto.createHash('md5');
assert.strictEqual( assert.strictEqual(
md5Hash.update(data.Body).digest('hex'), md5Hash.update(data.Body).digest('hex'),
md5HashExpected.update(body).digest('hex') md5HashExpected.update(body).digest('hex'),
); );
return cb(); return cb();
}); });
@ -770,7 +770,7 @@ describe('GET object', () => {
const expected = Buffer.alloc(partSize).fill(num); const expected = Buffer.alloc(partSize).fill(num);
assert.strictEqual( assert.strictEqual(
md5Hash.update(data.Body).digest('hex'), md5Hash.update(data.Body).digest('hex'),
md5HashExpected.update(expected).digest('hex') md5HashExpected.update(expected).digest('hex'),
); );
return done(); return done();
}); });
@ -790,7 +790,7 @@ describe('GET object', () => {
.fill(unOrderedPartNumbers[num - 1]); .fill(unOrderedPartNumbers[num - 1]);
assert.strictEqual( assert.strictEqual(
md5Hash.update(data.Body).digest('hex'), md5Hash.update(data.Body).digest('hex'),
md5HashExpected.update(expected).digest('hex') md5HashExpected.update(expected).digest('hex'),
); );
return done(); return done();
}); });
@ -829,7 +829,7 @@ describe('GET object', () => {
const expected = Buffer.alloc(10); const expected = Buffer.alloc(10);
assert.strictEqual( assert.strictEqual(
md5Hash.update(data.Body).digest('hex'), md5Hash.update(data.Body).digest('hex'),
md5HashExpected.update(expected).digest('hex') md5HashExpected.update(expected).digest('hex'),
); );
done(); done();
}); });
@ -849,7 +849,7 @@ describe('GET object', () => {
const expected = Buffer.alloc(10); const expected = Buffer.alloc(10);
assert.strictEqual( assert.strictEqual(
md5Hash.update(data.Body).digest('hex'), md5Hash.update(data.Body).digest('hex'),
md5HashExpected.update(expected).digest('hex') md5HashExpected.update(expected).digest('hex'),
); );
done(); done();
}); });

View File

@ -12,6 +12,10 @@ const bucket = 'mock-bucket-lock';
const unlockedBucket = 'mock-bucket-no-lock'; const unlockedBucket = 'mock-bucket-no-lock';
const key = 'mock-object-legalhold'; const key = 'mock-object-legalhold';
const keyNoHold = 'mock-object-no-legalhold'; const keyNoHold = 'mock-object-no-legalhold';
const nonExistingId = process.env.AWS_ON_AIR ?
'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' :
'3939393939393939393936493939393939393939756e6437';
describe('GET object legal hold', () => { describe('GET object legal hold', () => {
withV4(sigCfg => { withV4(sigCfg => {
@ -86,7 +90,7 @@ describe('GET object legal hold', () => {
s3.getObjectLegalHold({ s3.getObjectLegalHold({
Bucket: bucket, Bucket: bucket,
Key: key, Key: key,
VersionId: '000000000000', VersionId: nonExistingId,
}, err => { }, err => {
checkError(err, 'NoSuchVersion', 404); checkError(err, 'NoSuchVersion', 404);
done(); done();

View File

@ -117,7 +117,7 @@ describe('Part size tests with object head', () => {
partNumbers.forEach(part => { partNumbers.forEach(part => {
it(`should return the size of part ${part + 1} ` + it(`should return the size of part ${part + 1} ` +
`when --part-number is set to ${part + 1}`, done => { `when --part-number is set to ${part + 1}`, done => {
const partNumber = Number.parseInt(part, 0) + 1; const partNumber = Number.parseInt(part, 10) + 1;
const partSize = bodySize + partNumber; const partSize = bodySize + partNumber;
s3.headObject({ Bucket: bucket, Key: object, PartNumber: partNumber }, (err, data) => { s3.headObject({ Bucket: bucket, Key: object, PartNumber: partNumber }, (err, data) => {

View File

@ -60,13 +60,13 @@ describe('aws-node-sdk range test of large end position', () => {
it('should get the final 90 bytes of a 2890 byte object for a byte ' + it('should get the final 90 bytes of a 2890 byte object for a byte ' +
'range of 2800-', 'range of 2800-',
done => endRangeTest('bytes=2800-', 'bytes 2800-2889/2890', done) done => endRangeTest('bytes=2800-', 'bytes 2800-2889/2890', done),
); );
it('should get the final 90 bytes of a 2890 byte object for a byte ' + it('should get the final 90 bytes of a 2890 byte object for a byte ' +
'range of 2800-Number.MAX_SAFE_INTEGER', 'range of 2800-Number.MAX_SAFE_INTEGER',
done => endRangeTest(`bytes=2800-${Number.MAX_SAFE_INTEGER}`, done => endRangeTest(`bytes=2800-${Number.MAX_SAFE_INTEGER}`,
'bytes 2800-2889/2890', done) 'bytes 2800-2889/2890', done),
); );
}); });
}); });

View File

@ -13,6 +13,9 @@ const bucketName = 'lockenabledbucket';
const unlockedBucket = 'locknotenabledbucket'; const unlockedBucket = 'locknotenabledbucket';
const objectName = 'putobjectretentionobject'; const objectName = 'putobjectretentionobject';
const noRetentionObject = 'objectwithnoretention'; const noRetentionObject = 'objectwithnoretention';
const nonExistingId = process.env.AWS_ON_AIR ?
'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' :
'3939393939393939393936493939393939393939756e6437';
const retainDate = moment().add(1, 'days').toISOString(); const retainDate = moment().add(1, 'days').toISOString();
@ -106,7 +109,7 @@ describe('GET object retention', () => {
s3.getObjectRetention({ s3.getObjectRetention({
Bucket: bucketName, Bucket: bucketName,
Key: objectName, Key: objectName,
VersionId: '000000000000', VersionId: nonExistingId,
}, err => { }, err => {
checkError(err, 'NoSuchVersion', 404); checkError(err, 'NoSuchVersion', 404);
done(); done();

View File

@ -130,7 +130,7 @@ describe('List parts - object keys with special characters: `&`', () => {
.then(res => { .then(res => {
uploadId = res; uploadId = res;
return Promise.resolve(); return Promise.resolve();
}) }),
); );
afterEach(() => deletePart(s3, bucketUtil, key, uploadId)); afterEach(() => deletePart(s3, bucketUtil, key, uploadId));
@ -152,7 +152,7 @@ describe('List parts - object keys with special characters: `"`', () => {
.then(res => { .then(res => {
uploadId = res; uploadId = res;
return Promise.resolve(); return Promise.resolve();
}) }),
); );
afterEach(() => deletePart(s3, bucketUtil, key, uploadId)); afterEach(() => deletePart(s3, bucketUtil, key, uploadId));
@ -174,7 +174,7 @@ describe('List parts - object keys with special characters: `\'`', () => {
.then(res => { .then(res => {
uploadId = res; uploadId = res;
return Promise.resolve(); return Promise.resolve();
}) }),
); );
afterEach(() => deletePart(s3, bucketUtil, key, uploadId)); afterEach(() => deletePart(s3, bucketUtil, key, uploadId));
@ -196,7 +196,7 @@ describe('List parts - object keys with special characters: `<`', () => {
.then(res => { .then(res => {
uploadId = res; uploadId = res;
return Promise.resolve(); return Promise.resolve();
}) }),
); );
afterEach(() => deletePart(s3, bucketUtil, key, uploadId)); afterEach(() => deletePart(s3, bucketUtil, key, uploadId));
@ -218,7 +218,7 @@ describe('List parts - object keys with special characters: `>`', () => {
.then(res => { .then(res => {
uploadId = res; uploadId = res;
return Promise.resolve(); return Promise.resolve();
}) }),
); );
afterEach(() => deletePart(s3, bucketUtil, key, uploadId)); afterEach(() => deletePart(s3, bucketUtil, key, uploadId));

View File

@ -108,12 +108,12 @@ describe('aws-node-sdk test suite of listMultipartUploads', () =>
UploadId: data.uploadId, UploadId: data.uploadId,
}).promise() }).promise()
.then(() => bucketUtil.empty(bucket)) .then(() => bucketUtil.empty(bucket))
.then(() => bucketUtil.deleteOne(bucket)) .then(() => bucketUtil.deleteOne(bucket)),
); );
it('should list ongoing multipart uploads', () => it('should list ongoing multipart uploads', () =>
s3.listMultipartUploads({ Bucket: bucket }).promise() s3.listMultipartUploads({ Bucket: bucket }).promise()
.then(res => checkValues(res, data)) .then(res => checkValues(res, data)),
); );
it('should list ongoing multipart uploads with params', () => { it('should list ongoing multipart uploads with params', () => {
@ -139,5 +139,5 @@ describe('aws-node-sdk test suite of listMultipartUploads', () =>
}).promise() }).promise()
.then(res => checkValues(res, data)); .then(res => checkValues(res, data));
}); });
}) }),
); );

View File

@ -80,7 +80,7 @@ describe('Multi-Object Delete Success', function success() {
Bucket: bucketName, Bucket: bucketName,
Key: key, Key: key,
Body: 'somebody', Body: 'somebody',
}).promise() }).promise(),
); );
queued.push(result); queued.push(result);
return result; return result;

View File

@ -79,10 +79,10 @@ describe('Object Copy', () => {
s3 = bucketUtil.s3; s3 = bucketUtil.s3;
return bucketUtil.empty(sourceBucketName) return bucketUtil.empty(sourceBucketName)
.then(() => .then(() =>
bucketUtil.empty(destBucketName) bucketUtil.empty(destBucketName),
) )
.then(() => .then(() =>
bucketUtil.deleteMany([sourceBucketName, destBucketName]) bucketUtil.deleteMany([sourceBucketName, destBucketName]),
) )
.catch(err => { .catch(err => {
if (err.code !== 'NoSuchBucket') { if (err.code !== 'NoSuchBucket') {
@ -90,9 +90,9 @@ describe('Object Copy', () => {
throw err; throw err;
} }
}) })
.then(() => bucketUtil.createOne(sourceBucketName) .then(() => bucketUtil.createOne(sourceBucketName),
) )
.then(() => bucketUtil.createOne(destBucketName) .then(() => bucketUtil.createOne(destBucketName),
) )
.catch(err => { .catch(err => {
throw err; throw err;
@ -121,7 +121,7 @@ describe('Object Copy', () => {
})); }));
afterEach(() => bucketUtil.empty(sourceBucketName) afterEach(() => bucketUtil.empty(sourceBucketName)
.then(() => bucketUtil.empty(destBucketName)) .then(() => bucketUtil.empty(destBucketName)),
); );
after(() => bucketUtil.deleteMany([sourceBucketName, destBucketName])); after(() => bucketUtil.deleteMany([sourceBucketName, destBucketName]));
@ -179,7 +179,7 @@ describe('Object Copy', () => {
CopySource: `${sourceBucketName}/${sourceObjName}` }, CopySource: `${sourceBucketName}/${sourceObjName}` },
(err, res) => (err, res) =>
successCopyCheck(err, res, originalMetadata, successCopyCheck(err, res, originalMetadata,
destBucketName, destObjName, done) destBucketName, destObjName, done),
); );
}); });
@ -330,7 +330,7 @@ describe('Object Copy', () => {
// Should remove V4 streaming value 'aws-chunked' // Should remove V4 streaming value 'aws-chunked'
// to be compatible with AWS behavior // to be compatible with AWS behavior
assert.strictEqual(res.ContentEncoding, assert.strictEqual(res.ContentEncoding,
'base64,' 'base64,',
); );
assert.strictEqual(res.Expires.toGMTString(), assert.strictEqual(res.Expires.toGMTString(),
originalExpires.toGMTString()); originalExpires.toGMTString());
@ -346,7 +346,7 @@ describe('Object Copy', () => {
CopySource: `${sourceBucketName}/${sourceObjName}` }, CopySource: `${sourceBucketName}/${sourceObjName}` },
(err, res) => (err, res) =>
successCopyCheck(err, res, originalMetadata, successCopyCheck(err, res, originalMetadata,
sourceBucketName, destObjName, done) sourceBucketName, destObjName, done),
); );
}); });
@ -406,7 +406,7 @@ describe('Object Copy', () => {
Metadata: newMetadata }, Metadata: newMetadata },
(err, res) => (err, res) =>
successCopyCheck(err, res, newMetadata, successCopyCheck(err, res, newMetadata,
sourceBucketName, sourceObjName, done) sourceBucketName, sourceObjName, done),
); );
}); });
@ -419,7 +419,7 @@ describe('Object Copy', () => {
}, },
(err, res) => (err, res) =>
successCopyCheck(err, res, newMetadata, successCopyCheck(err, res, newMetadata,
destBucketName, destObjName, done) destBucketName, destObjName, done),
); );
}); });
@ -824,12 +824,12 @@ describe('Object Copy', () => {
const otherAccountBucket = 'otheraccountbucket42342342342'; const otherAccountBucket = 'otheraccountbucket42342342342';
const otherAccountKey = 'key'; const otherAccountKey = 'key';
beforeEach(() => otherAccountBucketUtility beforeEach(() => otherAccountBucketUtility
.createOne(otherAccountBucket) .createOne(otherAccountBucket),
); );
afterEach(() => otherAccountBucketUtility.empty(otherAccountBucket) afterEach(() => otherAccountBucketUtility.empty(otherAccountBucket)
.then(() => otherAccountBucketUtility .then(() => otherAccountBucketUtility
.deleteOne(otherAccountBucket)) .deleteOne(otherAccountBucket)),
); );
it('should not allow an account without read persmission on the ' + it('should not allow an account without read persmission on the ' +

View File

@ -46,7 +46,7 @@ describe('HEAD object, conditions', () => {
bucketUtil = new BucketUtility('default', sigCfg); bucketUtil = new BucketUtility('default', sigCfg);
s3 = bucketUtil.s3; s3 = bucketUtil.s3;
return bucketUtil.empty(bucketName).then(() => return bucketUtil.empty(bucketName).then(() =>
bucketUtil.deleteOne(bucketName) bucketUtil.deleteOne(bucketName),
) )
.catch(err => { .catch(err => {
if (err.code !== 'NoSuchBucket') { if (err.code !== 'NoSuchBucket') {

View File

@ -22,7 +22,7 @@ describe('HEAD object, compatibility headers [Cache-Control, ' +
bucketUtil = new BucketUtility('default', sigCfg); bucketUtil = new BucketUtility('default', sigCfg);
s3 = bucketUtil.s3; s3 = bucketUtil.s3;
return bucketUtil.empty(bucketName).then(() => return bucketUtil.empty(bucketName).then(() =>
bucketUtil.deleteOne(bucketName) bucketUtil.deleteOne(bucketName),
) )
.catch(err => { .catch(err => {
if (err.code !== 'NoSuchBucket') { if (err.code !== 'NoSuchBucket') {

View File

@ -38,7 +38,7 @@ describe('Put object with same key as prior object', () => {
Body: 'I am the best content ever', Body: 'I am the best content ever',
Metadata: firstPutMetadata, Metadata: firstPutMetadata,
}).promise().then(() => }).promise().then(() =>
s3.headObject({ Bucket: bucketName, Key: objectName }).promise() s3.headObject({ Bucket: bucketName, Key: objectName }).promise(),
).then(res => { ).then(res => {
assert.deepStrictEqual(res.Metadata, firstPutMetadata); assert.deepStrictEqual(res.Metadata, firstPutMetadata);
})); }));
@ -54,7 +54,7 @@ describe('Put object with same key as prior object', () => {
Body: 'Much different', Body: 'Much different',
Metadata: secondPutMetadata, Metadata: secondPutMetadata,
}).promise().then(() => }).promise().then(() =>
s3.getObject({ Bucket: bucketName, Key: objectName }).promise() s3.getObject({ Bucket: bucketName, Key: objectName }).promise(),
).then(res => { ).then(res => {
assert.deepStrictEqual(res.Metadata, secondPutMetadata); assert.deepStrictEqual(res.Metadata, secondPutMetadata);
assert.deepStrictEqual(res.Body.toString(), assert.deepStrictEqual(res.Body.toString(),

View File

@ -8,6 +8,9 @@ const changeObjectLock = require('../../../../utilities/objectLock-util');
const bucket = 'mock-bucket-lock'; const bucket = 'mock-bucket-lock';
const unlockedBucket = 'mock-bucket-no-lock'; const unlockedBucket = 'mock-bucket-no-lock';
const key = 'mock-object'; const key = 'mock-object';
const nonExistingId = process.env.AWS_ON_AIR ?
'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' :
'3939393939393939393936493939393939393939756e6437';
const mockLegalHold = { const mockLegalHold = {
empty: {}, empty: {},
@ -98,7 +101,7 @@ describe('PUT object legal hold', () => {
s3.putObjectLegalHold({ s3.putObjectLegalHold({
Bucket: bucket, Bucket: bucket,
Key: key, Key: key,
VersionId: '000000000000', VersionId: nonExistingId,
LegalHold: mockLegalHold.on, LegalHold: mockLegalHold.on,
}, err => { }, err => {
checkError(err, 'NoSuchVersion', 404); checkError(err, 'NoSuchVersion', 404);

View File

@ -9,6 +9,9 @@ const changeObjectLock = require('../../../../utilities/objectLock-util');
const bucketName = 'lockenabledputbucket'; const bucketName = 'lockenabledputbucket';
const unlockedBucket = 'locknotenabledputbucket'; const unlockedBucket = 'locknotenabledputbucket';
const objectName = 'putobjectretentionobject'; const objectName = 'putobjectretentionobject';
const nonExistingId = process.env.AWS_ON_AIR ?
'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' :
'3939393939393939393936493939393939393939756e6437';
const retentionConfig = { const retentionConfig = {
Mode: 'GOVERNANCE', Mode: 'GOVERNANCE',
@ -79,7 +82,7 @@ describe('PUT object retention', () => {
s3.putObjectRetention({ s3.putObjectRetention({
Bucket: bucketName, Bucket: bucketName,
Key: objectName, Key: objectName,
VersionId: '000000000000', VersionId: nonExistingId,
Retention: retentionConfig, Retention: retentionConfig,
}, err => { }, err => {
checkError(err, 'NoSuchVersion', 404); checkError(err, 'NoSuchVersion', 404);

View File

@ -74,7 +74,7 @@ function uploadParts(bytes, uploadId) {
PartNumber: part, PartNumber: part,
UploadId: uploadId, UploadId: uploadId,
Body: createReadStream(`${name}.mpuPart${part}`), Body: createReadStream(`${name}.mpuPart${part}`),
}).promise()) }).promise()),
); );
} }
@ -123,7 +123,7 @@ describe('aws-node-sdk range tests', () => {
}, },
], ],
}, },
}).promise()) }).promise()),
); );
afterEach(() => bucketUtil.empty(bucket) afterEach(() => bucketUtil.empty(bucket)
@ -139,7 +139,7 @@ describe('aws-node-sdk range tests', () => {
resolve(); resolve();
})) }))
.then(() => bucketUtil.deleteOne(bucket)) .then(() => bucketUtil.deleteOne(bucket))
.then(() => execAsync(`rm hashedFile.${fileSize}*`)) .then(() => execAsync(`rm hashedFile.${fileSize}*`)),
); );
it('should get a range from the first part of an object', () => it('should get a range from the first part of an object', () =>

View File

@ -64,8 +64,8 @@ describeFn('GET Service - AWS.S3.listBuckets', function getService() {
accessKeyId: 'wrong', accessKeyId: 'wrong',
secretAccessKey: 'wrong again', secretAccessKey: 'wrong again',
}, },
sigCfg sigCfg,
) ),
); );
const expectedCode = 'InvalidAccessKeyId'; const expectedCode = 'InvalidAccessKeyId';
const expectedStatus = 403; const expectedStatus = 403;
@ -165,7 +165,7 @@ describeFn('GET Service - AWS.S3.listBuckets', function getService() {
}) })
.then(data => { .then(data => {
const buckets = data.Buckets.filter(bucket => const buckets = data.Buckets.filter(bucket =>
createdBuckets.indexOf(bucket.Name) > -1 createdBuckets.indexOf(bucket.Name) > -1,
); );
assert.equal(buckets.length, createdBuckets.length, assert.equal(buckets.length, createdBuckets.length,

View File

@ -174,7 +174,7 @@ describe('Multi-Object Versioning Delete Success', function success() {
}); });
describe('Multi-Object Versioning Delete - deleting delete marker', describe('Multi-Object Versioning Delete - deleting delete marker',
() => { () => {
withV4(sigCfg => { withV4(sigCfg => {
const bucketUtil = new BucketUtility('default', sigCfg); const bucketUtil = new BucketUtility('default', sigCfg);
const s3 = bucketUtil.s3; const s3 = bucketUtil.s3;
@ -306,4 +306,4 @@ describe('Multi-Object Versioning Delete - deleting delete marker',
], err => done(err)); ], err => done(err));
}); });
}); });
}); });

View File

@ -117,7 +117,7 @@ describe('Object Version Copy', () => {
lastModified = res.LastModified; lastModified = res.LastModified;
}).then(() => s3.putObject({ Bucket: sourceBucketName, }).then(() => s3.putObject({ Bucket: sourceBucketName,
Key: sourceObjName, Key: sourceObjName,
Body: secondContent }).promise()) Body: secondContent }).promise()),
); );
afterEach(done => async.parallel([ afterEach(done => async.parallel([
@ -257,7 +257,7 @@ describe('Object Version Copy', () => {
CopySource: copySource }, CopySource: copySource },
(err, res) => (err, res) =>
successCopyCheck(err, res, originalMetadata, successCopyCheck(err, res, originalMetadata,
destBucketName, destObjName, done) destBucketName, destObjName, done),
); );
}); });
@ -281,7 +281,7 @@ describe('Object Version Copy', () => {
// Should remove V4 streaming value 'aws-chunked' // Should remove V4 streaming value 'aws-chunked'
// to be compatible with AWS behavior // to be compatible with AWS behavior
assert.strictEqual(res.ContentEncoding, assert.strictEqual(res.ContentEncoding,
'base64,' 'base64,',
); );
assert.strictEqual(res.Expires.toGMTString(), assert.strictEqual(res.Expires.toGMTString(),
originalExpires.toGMTString()); originalExpires.toGMTString());
@ -297,7 +297,7 @@ describe('Object Version Copy', () => {
CopySource: copySource }, CopySource: copySource },
(err, res) => (err, res) =>
successCopyCheck(err, res, originalMetadata, successCopyCheck(err, res, originalMetadata,
sourceBucketName, destObjName, done) sourceBucketName, destObjName, done),
); );
}); });
@ -309,7 +309,7 @@ describe('Object Version Copy', () => {
Metadata: newMetadata }, Metadata: newMetadata },
(err, res) => (err, res) =>
successCopyCheck(err, res, newMetadata, successCopyCheck(err, res, newMetadata,
sourceBucketName, sourceObjName, done) sourceBucketName, sourceObjName, done),
); );
}); });
@ -322,7 +322,7 @@ describe('Object Version Copy', () => {
}, },
(err, res) => (err, res) =>
successCopyCheck(err, res, newMetadata, successCopyCheck(err, res, newMetadata,
destBucketName, destObjName, done) destBucketName, destObjName, done),
); );
}); });
@ -637,7 +637,7 @@ describe('Object Version Copy', () => {
CopySource: copySource }, CopySource: copySource },
(err, res) => (err, res) =>
successCopyCheck(err, res, originalMetadata, successCopyCheck(err, res, originalMetadata,
sourceBucketName, sourceObjName, done) sourceBucketName, sourceObjName, done),
); );
}); });
@ -749,12 +749,12 @@ describe('Object Version Copy', () => {
const otherAccountBucket = 'otheraccountbucket42342342342'; const otherAccountBucket = 'otheraccountbucket42342342342';
const otherAccountKey = 'key'; const otherAccountKey = 'key';
beforeEach(() => otherAccountBucketUtility beforeEach(() => otherAccountBucketUtility
.createOne(otherAccountBucket) .createOne(otherAccountBucket),
); );
afterEach(() => otherAccountBucketUtility.empty(otherAccountBucket) afterEach(() => otherAccountBucketUtility.empty(otherAccountBucket)
.then(() => otherAccountBucketUtility .then(() => otherAccountBucketUtility
.deleteOne(otherAccountBucket)) .deleteOne(otherAccountBucket)),
); );
it('should not allow an account without read persmission on the ' + it('should not allow an account without read persmission on the ' +

View File

@ -127,6 +127,6 @@ describe('Healthcheck stats', () => {
assert.deepStrictEqual(JSON.parse(res), expectedStatsRes); assert.deepStrictEqual(JSON.parse(res), expectedStatsRes);
return done(); return done();
}); });
}, 500) }, 500),
); );
}); });

View File

@ -432,7 +432,7 @@ describe('s3cmd copyObject without MPU to same bucket', function copyStuff() {
describe('s3cmd copyObject without MPU to different bucket ' + describe('s3cmd copyObject without MPU to different bucket ' +
'(always unencrypted)', '(always unencrypted)',
function copyStuff() { function copyStuff() {
const copyBucket = 'receiverbucket'; const copyBucket = 'receiverbucket';
this.timeout(40000); this.timeout(40000);
@ -465,7 +465,7 @@ describe('s3cmd copyObject without MPU to different bucket ' +
it('should delete copy of object', done => { it('should delete copy of object', done => {
exec(['rm', `s3://${copyBucket}/${upload}`], done); exec(['rm', `s3://${copyBucket}/${upload}`], done);
}); });
}); });
describe('s3cmd put and get object ACLs', function aclObj() { describe('s3cmd put and get object ACLs', function aclObj() {
this.timeout(60000); this.timeout(60000);

View File

@ -1,6 +1,5 @@
'use strict'; // eslint-disable-line strict 'use strict'; // eslint-disable-line strict
const assert = require('assert'); const assert = require('assert');
const { errors } = require('arsenal');
const DummyRequestLogger = require('../unit/helpers').DummyRequestLogger; const DummyRequestLogger = require('../unit/helpers').DummyRequestLogger;
const clientCheck const clientCheck
= require('../../lib/utilities/healthcheckHandler').clientCheck; = require('../../lib/utilities/healthcheckHandler').clientCheck;
@ -71,7 +70,7 @@ describe('Healthcheck response', () => {
const azureLocationNonExistContainerError = const azureLocationNonExistContainerError =
results[azureLocationNonExistContainer].error; results[azureLocationNonExistContainer].error;
if (err) { if (err) {
assert.strictEqual(err, errors.InternalError, assert(err.is.InternalError,
`got unexpected err in clientCheck: ${err}`); `got unexpected err in clientCheck: ${err}`);
assert(azureLocationNonExistContainerError.startsWith( assert(azureLocationNonExistContainerError.startsWith(
'The specified container is being deleted.')); 'The specified container is being deleted.'));

View File

@ -2,7 +2,7 @@ const assert = require('assert');
const async = require('async'); const async = require('async');
const AWS = require('aws-sdk'); const AWS = require('aws-sdk');
const { parseString } = require('xml2js'); const { parseString } = require('xml2js');
const { errors, models } = require('arsenal'); const { models } = require('arsenal');
const BucketInfo = models.BucketInfo; const BucketInfo = models.BucketInfo;
const { getRealAwsConfig } = const { getRealAwsConfig } =
@ -460,11 +460,12 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() {
abortMPU(uploadId, getAwsParams(objectKey), () => { abortMPU(uploadId, getAwsParams(objectKey), () => {
const listParams = getListParams(objectKey, uploadId); const listParams = getListParams(objectKey, uploadId);
listParts(authInfo, listParams, log, err => { listParts(authInfo, listParams, log, err => {
assert.deepStrictEqual(err, errors.ServiceUnavailable const wantedDesc = 'Error returned from AWS: ' +
.customizeDescription('Error returned from AWS: ' +
'The specified upload does not exist. The upload ID ' + 'The specified upload does not exist. The upload ID ' +
'may be invalid, or the upload may have been aborted ' + 'may be invalid, or the upload may have been aborted' +
'or completed.')); ' or completed.';
assert.strictEqual(err.is.ServiceUnavailable, true);
assert.deepStrictEqual(err.description, wantedDesc);
done(); done();
}); });
}); });
@ -513,7 +514,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() {
const fakeKey = `key-${Date.now()}`; const fakeKey = `key-${Date.now()}`;
const delParams = getDeleteParams(fakeKey, fakeUploadId); const delParams = getDeleteParams(fakeKey, fakeUploadId);
multipartDelete(authInfo, delParams, log, err => { multipartDelete(authInfo, delParams, log, err => {
assert.equal(err, errors.NoSuchUpload, assert.strictEqual(err.is.NoSuchUpload, true,
`Error aborting MPU: ${err}`); `Error aborting MPU: ${err}`);
done(); done();
}); });
@ -639,7 +640,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() {
const compParams = getCompleteParams(objectKey, uploadId); const compParams = getCompleteParams(objectKey, uploadId);
compParams.post = errorBody; compParams.post = errorBody;
completeMultipartUpload(authInfo, compParams, log, err => { completeMultipartUpload(authInfo, compParams, log, err => {
assert.deepStrictEqual(err, errors.InvalidPart); assert.strictEqual(err.is.InvalidPart, true);
done(); done();
}); });
}); });
@ -661,7 +662,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() {
const compParams = getCompleteParams(objectKey, uploadId); const compParams = getCompleteParams(objectKey, uploadId);
compParams.post = errorBody; compParams.post = errorBody;
completeMultipartUpload(authInfo, compParams, log, err => { completeMultipartUpload(authInfo, compParams, log, err => {
assert.deepStrictEqual(err, errors.InvalidPartOrder); assert.strictEqual(err.is.InvalidPartOrder, true);
done(); done();
}); });
}); });
@ -687,7 +688,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() {
const compParams = getCompleteParams(objectKey, uploadId); const compParams = getCompleteParams(objectKey, uploadId);
compParams.post = errorBody; compParams.post = errorBody;
completeMultipartUpload(authInfo, compParams, log, err => { completeMultipartUpload(authInfo, compParams, log, err => {
assert.deepStrictEqual(err, errors.EntityTooSmall); assert.strictEqual(err.is.EntityTooSmall, true);
done(); done();
}); });
}); });
@ -825,7 +826,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() {
(uploadId, next) => { (uploadId, next) => {
const listParams = getListParams(objectKey, uploadId); const listParams = getListParams(objectKey, uploadId);
listParts(authInfo, listParams, log, err => { listParts(authInfo, listParams, log, err => {
assert(err.NoSuchUpload); assert.strictEqual(err.is.NoSuchUpload, true);
next(); next();
}); });
}, },

View File

@ -58,7 +58,7 @@ function put(bucketLoc, objLoc, requestHost, cb, errorDescription) {
resHeaders) => { resHeaders) => {
if (errorDescription) { if (errorDescription) {
assert.strictEqual(err.code, 400); assert.strictEqual(err.code, 400);
assert(err.InvalidArgument); assert(err.is.InvalidArgument);
assert(err.description.indexOf(errorDescription) > -1); assert(err.description.indexOf(errorDescription) > -1);
} else { } else {
assert.strictEqual(err, null, `Error putting object: ${err}`); assert.strictEqual(err, null, `Error putting object: ${err}`);

View File

@ -2,6 +2,7 @@ const assert = require('assert');
const async = require('async'); const async = require('async');
const { parseString } = require('xml2js'); const { parseString } = require('xml2js');
const AWS = require('aws-sdk'); const AWS = require('aws-sdk');
const { errors } = require('arsenal');
const { cleanup, DummyRequestLogger, makeAuthInfo } const { cleanup, DummyRequestLogger, makeAuthInfo }
= require('../unit/helpers'); = require('../unit/helpers');
@ -56,7 +57,7 @@ function getAwsParamsBucketMismatch(destObjName, uploadId) {
} }
function copyPutPart(bucketLoc, mpuLoc, srcObjLoc, requestHost, cb, function copyPutPart(bucketLoc, mpuLoc, srcObjLoc, requestHost, cb,
errorPutCopyPart) { errorPutCopyPart) {
const keys = getSourceAndDestKeys(); const keys = getSourceAndDestKeys();
const { sourceObjName, destObjName } = keys; const { sourceObjName, destObjName } = keys;
const post = bucketLoc ? '<?xml version="1.0" encoding="UTF-8"?>' + const post = bucketLoc ? '<?xml version="1.0" encoding="UTF-8"?>' +
@ -147,8 +148,7 @@ errorPutCopyPart) {
return objectPutCopyPart(authInfo, copyPartReq, return objectPutCopyPart(authInfo, copyPartReq,
bucketName, sourceObjName, undefined, log, (err, copyResult) => { bucketName, sourceObjName, undefined, log, (err, copyResult) => {
if (errorPutCopyPart) { if (errorPutCopyPart) {
assert.strictEqual(err.code, errorPutCopyPart.statusCode); assert.strictEqual(err.is[errorPutCopyPart.type], true);
assert(err[errorPutCopyPart.code]);
return cb(); return cb();
} }
assert.strictEqual(err, null); assert.strictEqual(err, null);
@ -172,7 +172,7 @@ function assertPartList(partList, uploadId) {
} }
describeSkipIfE2E('ObjectCopyPutPart API with multiple backends', describeSkipIfE2E('ObjectCopyPutPart API with multiple backends',
function testSuite() { function testSuite() {
this.timeout(60000); this.timeout(60000);
beforeEach(() => { beforeEach(() => {
@ -293,9 +293,8 @@ function testSuite() {
it('should return error 403 AccessDenied copying part to a ' + it('should return error 403 AccessDenied copying part to a ' +
'different AWS location without object READ access', 'different AWS location without object READ access',
done => { done => {
const errorPutCopyPart = { code: 'AccessDenied', statusCode: 403 };
copyPutPart(null, awsLocation, awsLocation2, 'localhost', done, copyPutPart(null, awsLocation, awsLocation2, 'localhost', done,
errorPutCopyPart); errors.AccessDenied);
}); });
@ -305,4 +304,4 @@ function testSuite() {
done(); done();
}); });
}); });
}); });

Some files were not shown because too many files have changed in this diff Show More