Compare commits
9 Commits
developmen
...
improvemen
Author | SHA1 | Date |
---|---|---|
Jordi Bertran de Balanda | 9b92a50ca7 | |
Jordi Bertran de Balanda | a6230f5538 | |
Ronnie Smith | 3bfcf624bf | |
Jordi Bertran de Balanda | bd9209ef5e | |
Jordi Bertran de Balanda | 371cb689af | |
Xin LI | 674860ef8a | |
Xin LI | ce28e08d3e | |
Xin LI | 67df4fa207 | |
Xin LI | 4100ac73b2 |
|
@ -1 +1,6 @@
|
||||||
{ "extends": "scality" }
|
{
|
||||||
|
"extends": "scality",
|
||||||
|
"parserOptions": {
|
||||||
|
"ecmaVersion": 2020
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -924,7 +924,7 @@ class Config extends EventEmitter {
|
||||||
`bad config: utapi.filter.${state}.${field} must be an array of strings`);
|
`bad config: utapi.filter.${state}.${field} must be an array of strings`);
|
||||||
utapiResourceFilters[field] = { [state]: new Set(resources) };
|
utapiResourceFilters[field] = { [state]: new Set(resources) };
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
));
|
));
|
||||||
this.utapi.filter = utapiResourceFilters;
|
this.utapi.filter = utapiResourceFilters;
|
||||||
}
|
}
|
||||||
|
@ -1088,7 +1088,7 @@ class Config extends EventEmitter {
|
||||||
typeof config.certFilePaths.cert === 'string' && ((
|
typeof config.certFilePaths.cert === 'string' && ((
|
||||||
config.certFilePaths.ca &&
|
config.certFilePaths.ca &&
|
||||||
typeof config.certFilePaths.ca === 'string') ||
|
typeof config.certFilePaths.ca === 'string') ||
|
||||||
!config.certFilePaths.ca)
|
!config.certFilePaths.ca),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
const { key, cert, ca } = config.certFilePaths ?
|
const { key, cert, ca } = config.certFilePaths ?
|
||||||
|
@ -1174,7 +1174,7 @@ class Config extends EventEmitter {
|
||||||
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
|
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
|
||||||
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
|
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
|
||||||
'bad environment variable: S3BACKEND environment variable ' +
|
'bad environment variable: S3BACKEND environment variable ' +
|
||||||
'should be one of mem/file/scality/cdmi'
|
'should be one of mem/file/scality/cdmi',
|
||||||
);
|
);
|
||||||
auth = process.env.S3BACKEND;
|
auth = process.env.S3BACKEND;
|
||||||
data = process.env.S3BACKEND;
|
data = process.env.S3BACKEND;
|
||||||
|
@ -1209,7 +1209,7 @@ class Config extends EventEmitter {
|
||||||
const validData = ['mem', 'file', 'scality', 'multiple'];
|
const validData = ['mem', 'file', 'scality', 'multiple'];
|
||||||
assert(validData.indexOf(process.env.S3DATA) > -1,
|
assert(validData.indexOf(process.env.S3DATA) > -1,
|
||||||
'bad environment variable: S3DATA environment variable ' +
|
'bad environment variable: S3DATA environment variable ' +
|
||||||
'should be one of mem/file/scality/multiple'
|
'should be one of mem/file/scality/multiple',
|
||||||
);
|
);
|
||||||
data = process.env.S3DATA;
|
data = process.env.S3DATA;
|
||||||
}
|
}
|
||||||
|
@ -1218,7 +1218,7 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
assert(this.locationConstraints !== undefined &&
|
assert(this.locationConstraints !== undefined &&
|
||||||
this.restEndpoints !== undefined,
|
this.restEndpoints !== undefined,
|
||||||
'bad config: locationConstraints and restEndpoints must be set'
|
'bad config: locationConstraints and restEndpoints must be set',
|
||||||
);
|
);
|
||||||
|
|
||||||
if (process.env.S3METADATA) {
|
if (process.env.S3METADATA) {
|
||||||
|
|
|
@ -49,7 +49,7 @@ function updateRequestContexts(request, requestContexts, apiMethod, log, cb) {
|
||||||
return metadata.getObjectMD(bucketName, objectKey, { versionId: reqVersionId }, log,
|
return metadata.getObjectMD(bucketName, objectKey, { versionId: reqVersionId }, log,
|
||||||
(err, objMD) => {
|
(err, objMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
if (err.NoSuchKey) {
|
if (err.is.NoSuchKey) {
|
||||||
return next();
|
return next();
|
||||||
}
|
}
|
||||||
log.trace('error getting request object tags');
|
log.trace('error getting request object tags');
|
||||||
|
|
|
@ -22,7 +22,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
|
||||||
|
|
||||||
// Get new format usersBucket to see if it exists
|
// Get new format usersBucket to see if it exists
|
||||||
return metadata.getBucket(usersBucket, log, (err, usersBucketAttrs) => {
|
return metadata.getBucket(usersBucket, log, (err, usersBucketAttrs) => {
|
||||||
if (err && !err.NoSuchBucket && !err.BucketAlreadyExists) {
|
if (err && !err.is.NoSuchBucket && !err.is.BucketAlreadyExists) {
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
const splitter = usersBucketAttrs ?
|
const splitter = usersBucketAttrs ?
|
||||||
|
@ -36,7 +36,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
|
||||||
usersBucket : oldUsersBucket;
|
usersBucket : oldUsersBucket;
|
||||||
return metadata.putObjectMD(usersBucketBeingCalled, key,
|
return metadata.putObjectMD(usersBucketBeingCalled, key,
|
||||||
omVal, {}, log, err => {
|
omVal, {}, log, err => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
// There must be no usersBucket so createBucket
|
// There must be no usersBucket so createBucket
|
||||||
// one using the new format
|
// one using the new format
|
||||||
log.trace('users bucket does not exist, ' +
|
log.trace('users bucket does not exist, ' +
|
||||||
|
@ -57,8 +57,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
|
||||||
// error with respect
|
// error with respect
|
||||||
// to the usersBucket.
|
// to the usersBucket.
|
||||||
if (err &&
|
if (err &&
|
||||||
err !==
|
!err.is.BucketAlreadyExists) {
|
||||||
errors.BucketAlreadyExists) {
|
|
||||||
log.error('error from metadata', {
|
log.error('error from metadata', {
|
||||||
error: err,
|
error: err,
|
||||||
});
|
});
|
||||||
|
@ -206,7 +205,7 @@ function createBucket(authInfo, bucketName, headers,
|
||||||
},
|
},
|
||||||
getAnyExistingBucketInfo: function getAnyExistingBucketInfo(callback) {
|
getAnyExistingBucketInfo: function getAnyExistingBucketInfo(callback) {
|
||||||
metadata.getBucket(bucketName, log, (err, data) => {
|
metadata.getBucket(bucketName, log, (err, data) => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
return callback(null, 'NoBucketYet');
|
return callback(null, 'NoBucketYet');
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|
|
@ -16,7 +16,7 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
|
||||||
`${mpuBucketPrefix}${destinationBucketName}`;
|
`${mpuBucketPrefix}${destinationBucketName}`;
|
||||||
return metadata.deleteBucket(mpuBucketName, log, err => {
|
return metadata.deleteBucket(mpuBucketName, log, err => {
|
||||||
// If the mpu bucket does not exist, just move on
|
// If the mpu bucket does not exist, just move on
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
return cb();
|
return cb();
|
||||||
}
|
}
|
||||||
return cb(err);
|
return cb(err);
|
||||||
|
@ -90,7 +90,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) {
|
||||||
log, (err, objectsListRes) => {
|
log, (err, objectsListRes) => {
|
||||||
// If no shadow bucket ever created, no ongoing MPU's, so
|
// If no shadow bucket ever created, no ongoing MPU's, so
|
||||||
// continue with deletion
|
// continue with deletion
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
return next();
|
return next();
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|
|
@ -11,16 +11,16 @@ function deleteUserBucketEntry(bucketName, canonicalID, log, cb) {
|
||||||
metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => {
|
metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => {
|
||||||
// If the object representing the bucket is not in the
|
// If the object representing the bucket is not in the
|
||||||
// users bucket just continue
|
// users bucket just continue
|
||||||
if (error && error.NoSuchKey) {
|
if (error?.is.NoSuchKey) {
|
||||||
return cb(null);
|
return cb(null);
|
||||||
// BACKWARDS COMPATIBILITY: Remove this once no longer
|
// BACKWARDS COMPATIBILITY: Remove this once no longer
|
||||||
// have old user bucket format
|
// have old user bucket format
|
||||||
} else if (error && error.NoSuchBucket) {
|
} else if (error?.is.NoSuchBucket) {
|
||||||
const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
|
const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
|
||||||
oldSplitter, bucketName);
|
oldSplitter, bucketName);
|
||||||
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
|
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
|
||||||
{}, log, error => {
|
{}, log, error => {
|
||||||
if (error && !error.NoSuchKey) {
|
if (error && !error.is.NoSuchKey) {
|
||||||
log.error('from metadata while deleting user bucket',
|
log.error('from metadata while deleting user bucket',
|
||||||
{ error });
|
{ error });
|
||||||
return cb(error);
|
return cb(error);
|
||||||
|
|
|
@ -239,7 +239,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
if (err) {
|
if (err) {
|
||||||
// TODO: check AWS error when user requested a specific
|
// TODO: check AWS error when user requested a specific
|
||||||
// version before any versions have been put
|
// version before any versions have been put
|
||||||
const logLvl = err === errors.BadRequest ?
|
const logLvl = err.is.BadRequest ?
|
||||||
'debug' : 'error';
|
'debug' : 'error';
|
||||||
log[logLvl]('error getting versioning info', {
|
log[logLvl]('error getting versioning info', {
|
||||||
error: err,
|
error: err,
|
||||||
|
|
|
@ -80,7 +80,7 @@ function _generateExpHeadresMPU(rules, params, datetime) {
|
||||||
const date = calculateDate(
|
const date = calculateDate(
|
||||||
params.date,
|
params.date,
|
||||||
rule.DaysAfterInitiation,
|
rule.DaysAfterInitiation,
|
||||||
datetime
|
datetime,
|
||||||
);
|
);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|
|
@ -0,0 +1,48 @@
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
|
const { config } = require('../../../Config');
|
||||||
|
const { getLocationMetric, pushLocationMetric } =
|
||||||
|
require('../../../utapi/utilities');
|
||||||
|
|
||||||
|
function _gbToBytes(gb) {
|
||||||
|
return gb * 1024 * 1024 * 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* locationStorageCheck - will ensure there is enough space left for object on
|
||||||
|
* PUT operations, or will update metric on DELETE
|
||||||
|
* NOTE: storage limit may not be exactly enforced in the case of concurrent
|
||||||
|
* requests when near limit
|
||||||
|
* @param {string} location - name of location to check quota
|
||||||
|
* @param {number} updateSize - new size to check against quota in bytes
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @param {function} cb - callback function
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function locationStorageCheck(location, updateSize, log, cb) {
|
||||||
|
const lc = config.locationConstraints;
|
||||||
|
const sizeLimitGB = lc[location] ? lc[location].sizeLimitGB : undefined;
|
||||||
|
if (updateSize === 0 || sizeLimitGB === undefined || sizeLimitGB === null) {
|
||||||
|
return cb();
|
||||||
|
}
|
||||||
|
// no need to list location metric, since it should be decreased
|
||||||
|
if (updateSize < 0) {
|
||||||
|
return pushLocationMetric(location, updateSize, log, cb);
|
||||||
|
}
|
||||||
|
return getLocationMetric(location, log, (err, bytesStored) => {
|
||||||
|
if (err) {
|
||||||
|
log.error(`Error listing metrics from Utapi: ${err.message}`);
|
||||||
|
return cb(err);
|
||||||
|
}
|
||||||
|
const newStorageSize = parseInt(bytesStored, 10) + updateSize;
|
||||||
|
const sizeLimitBytes = _gbToBytes(sizeLimitGB);
|
||||||
|
if (sizeLimitBytes < newStorageSize) {
|
||||||
|
return cb(errors.AccessDenied.customizeDescription(
|
||||||
|
`The assigned storage space limit for location ${location} ` +
|
||||||
|
'will be exceeded'));
|
||||||
|
}
|
||||||
|
return pushLocationMetric(location, updateSize, log, cb);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = locationStorageCheck;
|
|
@ -43,7 +43,7 @@ function validateHeaders(bucket, headers, log) {
|
||||||
!(objectLockMode && objectLockDate)) {
|
!(objectLockMode && objectLockDate)) {
|
||||||
return errors.InvalidArgument.customizeDescription(
|
return errors.InvalidArgument.customizeDescription(
|
||||||
'x-amz-object-lock-retain-until-date and ' +
|
'x-amz-object-lock-retain-until-date and ' +
|
||||||
'x-amz-object-lock-mode must both be supplied'
|
'x-amz-object-lock-mode must both be supplied',
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']);
|
const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']);
|
||||||
|
|
|
@ -2,7 +2,7 @@ const { errors } = require('arsenal');
|
||||||
const {
|
const {
|
||||||
parseRangeSpec,
|
parseRangeSpec,
|
||||||
parseRange,
|
parseRange,
|
||||||
} = require('arsenal/lib/network/http/utils');
|
} = require('arsenal').network.http.utils;
|
||||||
|
|
||||||
const constants = require('../../../../constants');
|
const constants = require('../../../../constants');
|
||||||
const setPartRanges = require('./setPartRanges');
|
const setPartRanges = require('./setPartRanges');
|
||||||
|
|
|
@ -292,7 +292,7 @@ function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
|
||||||
// it's possible there was a concurrent request to
|
// it's possible there was a concurrent request to
|
||||||
// delete the null version, so proceed with putting a
|
// delete the null version, so proceed with putting a
|
||||||
// new version
|
// new version
|
||||||
if (err === errors.NoSuchKey) {
|
if (err.is.NoSuchKey) {
|
||||||
return next(null, options);
|
return next(null, options);
|
||||||
}
|
}
|
||||||
return next(errors.InternalError);
|
return next(errors.InternalError);
|
||||||
|
|
|
@ -113,7 +113,7 @@ function processVersions(bucketName, listParams, list, encType) {
|
||||||
xml.push(
|
xml.push(
|
||||||
'<?xml version="1.0" encoding="UTF-8"?>',
|
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||||
'<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
'<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
||||||
'<Name>', bucketName, '</Name>'
|
'<Name>', bucketName, '</Name>',
|
||||||
);
|
);
|
||||||
const isTruncated = list.IsTruncated ? 'true' : 'false';
|
const isTruncated = list.IsTruncated ? 'true' : 'false';
|
||||||
const xmlParams = [
|
const xmlParams = [
|
||||||
|
@ -160,7 +160,7 @@ function processVersions(bucketName, listParams, list, encType) {
|
||||||
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
|
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
|
||||||
'</Owner>',
|
'</Owner>',
|
||||||
`<StorageClass>${v.StorageClass}</StorageClass>`,
|
`<StorageClass>${v.StorageClass}</StorageClass>`,
|
||||||
v.IsDeleteMarker ? '</DeleteMarker>' : '</Version>'
|
v.IsDeleteMarker ? '</DeleteMarker>' : '</Version>',
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
list.CommonPrefixes.forEach(item => {
|
list.CommonPrefixes.forEach(item => {
|
||||||
|
@ -176,7 +176,7 @@ function processMasterVersions(bucketName, listParams, list) {
|
||||||
xml.push(
|
xml.push(
|
||||||
'<?xml version="1.0" encoding="UTF-8"?>',
|
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||||
'<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
'<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
||||||
'<Name>', bucketName, '</Name>'
|
'<Name>', bucketName, '</Name>',
|
||||||
);
|
);
|
||||||
const isTruncated = list.IsTruncated ? 'true' : 'false';
|
const isTruncated = list.IsTruncated ? 'true' : 'false';
|
||||||
const xmlParams = [
|
const xmlParams = [
|
||||||
|
@ -234,19 +234,19 @@ function processMasterVersions(bucketName, listParams, list) {
|
||||||
`<Key>${objectKey}</Key>`,
|
`<Key>${objectKey}</Key>`,
|
||||||
`<LastModified>${v.LastModified}</LastModified>`,
|
`<LastModified>${v.LastModified}</LastModified>`,
|
||||||
`<ETag>"${v.ETag}"</ETag>`,
|
`<ETag>"${v.ETag}"</ETag>`,
|
||||||
`<Size>${v.Size}</Size>`
|
`<Size>${v.Size}</Size>`,
|
||||||
);
|
);
|
||||||
if (!listParams.v2 || listParams.fetchOwner) {
|
if (!listParams.v2 || listParams.fetchOwner) {
|
||||||
xml.push(
|
xml.push(
|
||||||
'<Owner>',
|
'<Owner>',
|
||||||
`<ID>${v.Owner.ID}</ID>`,
|
`<ID>${v.Owner.ID}</ID>`,
|
||||||
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
|
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
|
||||||
'</Owner>'
|
'</Owner>',
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return xml.push(
|
return xml.push(
|
||||||
`<StorageClass>${v.StorageClass}</StorageClass>`,
|
`<StorageClass>${v.StorageClass}</StorageClass>`,
|
||||||
'</Contents>'
|
'</Contents>',
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
list.CommonPrefixes.forEach(item => {
|
list.CommonPrefixes.forEach(item => {
|
||||||
|
|
|
@ -67,7 +67,7 @@ function bucketGetEncryption(authInfo, request, log, callback) {
|
||||||
'</ApplyServerSideEncryptionByDefault>',
|
'</ApplyServerSideEncryptionByDefault>',
|
||||||
'<BucketKeyEnabled>false</BucketKeyEnabled>',
|
'<BucketKeyEnabled>false</BucketKeyEnabled>',
|
||||||
'</Rule>',
|
'</Rule>',
|
||||||
'</ServerSideEncryptionConfiguration>'
|
'</ServerSideEncryptionConfiguration>',
|
||||||
);
|
);
|
||||||
|
|
||||||
pushMetric('getBucketEncryption', log, {
|
pushMetric('getBucketEncryption', log, {
|
||||||
|
|
|
@ -20,7 +20,7 @@ function convertToXml(versioningConfiguration) {
|
||||||
|
|
||||||
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
|
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
|
||||||
'<VersioningConfiguration ' +
|
'<VersioningConfiguration ' +
|
||||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
|
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
||||||
);
|
);
|
||||||
|
|
||||||
if (versioningConfiguration && versioningConfiguration.Status) {
|
if (versioningConfiguration && versioningConfiguration.Status) {
|
||||||
|
|
|
@ -341,7 +341,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
if (err) {
|
if (err) {
|
||||||
// TODO: check AWS error when user requested a specific
|
// TODO: check AWS error when user requested a specific
|
||||||
// version before any versions have been put
|
// version before any versions have been put
|
||||||
const logLvl = err === errors.BadRequest ?
|
const logLvl = err.is.BadRequest ?
|
||||||
'debug' : 'error';
|
'debug' : 'error';
|
||||||
log[logLvl]('error getting versioning info', {
|
log[logLvl]('error getting versioning info', {
|
||||||
error: err,
|
error: err,
|
||||||
|
|
|
@ -326,7 +326,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
return next(error, corsHeaders);
|
return next(error, corsHeaders);
|
||||||
}
|
}
|
||||||
return next(null, corsHeaders, destinationBucket, objectSSEConfig);
|
return next(null, corsHeaders, destinationBucket, objectSSEConfig);
|
||||||
}
|
},
|
||||||
),
|
),
|
||||||
],
|
],
|
||||||
(error, corsHeaders, destinationBucket, objectSSEConfig) => {
|
(error, corsHeaders, destinationBucket, objectSSEConfig) => {
|
||||||
|
@ -334,7 +334,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
return callback(error, null, corsHeaders);
|
return callback(error, null, corsHeaders);
|
||||||
}
|
}
|
||||||
return _storetheMPObject(destinationBucket, corsHeaders, objectSSEConfig);
|
return _storetheMPObject(destinationBucket, corsHeaders, objectSSEConfig);
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
|
@ -245,7 +245,7 @@ function listParts(authInfo, request, log, callback) {
|
||||||
xml.push(
|
xml.push(
|
||||||
'<?xml version="1.0" encoding="UTF-8"?>',
|
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||||
'<ListPartsResult xmlns="http://s3.amazonaws.com/doc/' +
|
'<ListPartsResult xmlns="http://s3.amazonaws.com/doc/' +
|
||||||
'2006-03-01/">'
|
'2006-03-01/">',
|
||||||
);
|
);
|
||||||
buildXML([
|
buildXML([
|
||||||
{ tag: 'Bucket', value: bucketName },
|
{ tag: 'Bucket', value: bucketName },
|
||||||
|
|
|
@ -84,7 +84,7 @@ function _formatXML(quietSetting, errorResults, deleted) {
|
||||||
'<Message>',
|
'<Message>',
|
||||||
escapeForXml(errorObj.error.description),
|
escapeForXml(errorObj.error.description),
|
||||||
'</Message>',
|
'</Message>',
|
||||||
'</Error>'
|
'</Error>',
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
errorXML = errorXML.join('');
|
errorXML = errorXML.join('');
|
||||||
|
@ -110,13 +110,13 @@ function _formatXML(quietSetting, errorResults, deleted) {
|
||||||
'<Deleted>',
|
'<Deleted>',
|
||||||
'<Key>',
|
'<Key>',
|
||||||
escapeForXml(version.entry.key),
|
escapeForXml(version.entry.key),
|
||||||
'</Key>'
|
'</Key>',
|
||||||
);
|
);
|
||||||
if (version.entry.versionId) {
|
if (version.entry.versionId) {
|
||||||
deletedXML.push(
|
deletedXML.push(
|
||||||
'<VersionId>',
|
'<VersionId>',
|
||||||
escapeForXml(version.entry.versionId),
|
escapeForXml(version.entry.versionId),
|
||||||
'</VersionId>'
|
'</VersionId>',
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (isDeleteMarker) {
|
if (isDeleteMarker) {
|
||||||
|
@ -126,7 +126,7 @@ function _formatXML(quietSetting, errorResults, deleted) {
|
||||||
'</DeleteMarker>',
|
'</DeleteMarker>',
|
||||||
'<DeleteMarkerVersionId>',
|
'<DeleteMarkerVersionId>',
|
||||||
deleteMarkerVersionId,
|
deleteMarkerVersionId,
|
||||||
'</DeleteMarkerVersionId>'
|
'</DeleteMarkerVersionId>',
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
deletedXML.push('</Deleted>');
|
deletedXML.push('</Deleted>');
|
||||||
|
@ -210,10 +210,10 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||||
(versionId, callback) => metadataGetObject(bucketName, entry.key,
|
(versionId, callback) => metadataGetObject(bucketName, entry.key,
|
||||||
versionId, log, (err, objMD) => {
|
versionId, log, (err, objMD) => {
|
||||||
// if general error from metadata return error
|
// if general error from metadata return error
|
||||||
if (err && !err.NoSuchKey) {
|
if (err && !err.is.NoSuchKey) {
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
if (err && err.NoSuchKey) {
|
if (err?.is.NoSuchKey) {
|
||||||
const verCfg = bucket.getVersioningConfiguration();
|
const verCfg = bucket.getVersioningConfiguration();
|
||||||
// To adhere to AWS behavior, create a delete marker
|
// To adhere to AWS behavior, create a delete marker
|
||||||
// if trying to delete an object that does not exist
|
// if trying to delete an object that does not exist
|
||||||
|
@ -386,7 +386,7 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
return vault.checkPolicies(requestContextParams, authInfo.getArn(),
|
return vault.checkPolicies(requestContextParams, authInfo.getArn(),
|
||||||
log, (err, authorizationResults) => {
|
log, (err, authorizationResults) => {
|
||||||
// there were no policies so received a blanket AccessDenied
|
// there were no policies so received a blanket AccessDenied
|
||||||
if (err && err.AccessDenied) {
|
if (err?.is.AccessDenied) {
|
||||||
objects.forEach(entry => {
|
objects.forEach(entry => {
|
||||||
errorResults.push({
|
errorResults.push({
|
||||||
entry,
|
entry,
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
const { errors } = require('arsenal');
|
|
||||||
|
|
||||||
const abortMultipartUpload = require('./apiUtils/object/abortMultipartUpload');
|
const abortMultipartUpload = require('./apiUtils/object/abortMultipartUpload');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const isLegacyAWSBehavior = require('../utilities/legacyAWSBehavior');
|
const isLegacyAWSBehavior = require('../utilities/legacyAWSBehavior');
|
||||||
|
@ -29,10 +27,10 @@ function multipartDelete(authInfo, request, log, callback) {
|
||||||
request.method, destinationBucket);
|
request.method, destinationBucket);
|
||||||
const location = destinationBucket ?
|
const location = destinationBucket ?
|
||||||
destinationBucket.getLocationConstraint() : null;
|
destinationBucket.getLocationConstraint() : null;
|
||||||
if (err && err !== errors.NoSuchUpload) {
|
if (err && !err.is.NoSuchUpload) {
|
||||||
return callback(err, corsHeaders);
|
return callback(err, corsHeaders);
|
||||||
}
|
}
|
||||||
if (err === errors.NoSuchUpload && isLegacyAWSBehavior(location)) {
|
if (err?.is.NoSuchUpload && isLegacyAWSBehavior(location)) {
|
||||||
log.trace('did not find valid mpu with uploadId', {
|
log.trace('did not find valid mpu with uploadId', {
|
||||||
method: 'multipartDelete',
|
method: 'multipartDelete',
|
||||||
uploadId,
|
uploadId,
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
const { errors, s3middleware } = require('arsenal');
|
const { errors, s3middleware } = require('arsenal');
|
||||||
const { parseRange } = require('arsenal/lib/network/http/utils');
|
const { parseRange } = require('arsenal').network.http.utils;
|
||||||
|
|
||||||
const data = require('../data/wrapper');
|
const data = require('../data/wrapper');
|
||||||
|
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
const { errors, s3middleware } = require('arsenal');
|
const { errors, s3middleware } = require('arsenal');
|
||||||
const validateHeaders = s3middleware.validateConditionalHeaders;
|
const validateHeaders = s3middleware.validateConditionalHeaders;
|
||||||
const { parseRange } = require('arsenal/lib/network/http/utils');
|
const { parseRange } = require('arsenal').network.http.utils;
|
||||||
|
|
||||||
const { decodeVersionId } = require('./apiUtils/object/versioning');
|
const { decodeVersionId } = require('./apiUtils/object/versioning');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
|
|
@ -101,7 +101,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
|
||||||
return next(invalidSSEError);
|
return next(invalidSSEError);
|
||||||
}
|
}
|
||||||
return next(null, sseConfig);
|
return next(null, sseConfig);
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
},
|
},
|
||||||
function createCipherBundle(serverSideEncryptionConfig, next) {
|
function createCipherBundle(serverSideEncryptionConfig, next) {
|
||||||
|
|
|
@ -182,7 +182,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
sourceLocationConstraintName, next) {
|
sourceLocationConstraintName, next) {
|
||||||
return metadata.getBucket(mpuBucketName, log,
|
return metadata.getBucket(mpuBucketName, log,
|
||||||
(err, mpuBucket) => {
|
(err, mpuBucket) => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
return next(errors.NoSuchUpload);
|
return next(errors.NoSuchUpload);
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -211,7 +211,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey,
|
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey,
|
||||||
null, log, (err, res) => {
|
null, log, (err, res) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
if (err.NoSuchKey) {
|
if (err.is.NoSuchKey) {
|
||||||
return next(errors.NoSuchUpload);
|
return next(errors.NoSuchUpload);
|
||||||
}
|
}
|
||||||
log.error('error getting overview object from ' +
|
log.error('error getting overview object from ' +
|
||||||
|
@ -263,7 +263,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
metadata.getObjectMD(mpuBucketName, partKey, {}, log,
|
metadata.getObjectMD(mpuBucketName, partKey, {}, log,
|
||||||
(err, result) => {
|
(err, result) => {
|
||||||
// If there is nothing being overwritten just move on
|
// If there is nothing being overwritten just move on
|
||||||
if (err && !err.NoSuchKey) {
|
if (err && !err.is.NoSuchKey) {
|
||||||
log.debug('error getting current part (if any)',
|
log.debug('error getting current part (if any)',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
|
|
|
@ -69,7 +69,7 @@ function objectPutLegalHold(authInfo, request, log, callback) {
|
||||||
log.trace('object lock not enabled on bucket',
|
log.trace('object lock not enabled on bucket',
|
||||||
{ method: 'objectPutLegalHold' });
|
{ method: 'objectPutLegalHold' });
|
||||||
return next(errors.InvalidRequest.customizeDescription(
|
return next(errors.InvalidRequest.customizeDescription(
|
||||||
'Bucket is missing Object Lock Configuration'
|
'Bucket is missing Object Lock Configuration',
|
||||||
), bucket);
|
), bucket);
|
||||||
}
|
}
|
||||||
return next(null, bucket, objectMD);
|
return next(null, bucket, objectMD);
|
||||||
|
|
|
@ -94,7 +94,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
// Get the destination bucket.
|
// Get the destination bucket.
|
||||||
next => metadata.getBucket(bucketName, log,
|
next => metadata.getBucket(bucketName, log,
|
||||||
(err, destinationBucket) => {
|
(err, destinationBucket) => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
return next(errors.NoSuchBucket, destinationBucket);
|
return next(errors.NoSuchBucket, destinationBucket);
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -142,7 +142,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
(destinationBucket, cipherBundle, next) =>
|
(destinationBucket, cipherBundle, next) =>
|
||||||
metadata.getBucket(mpuBucketName, log,
|
metadata.getBucket(mpuBucketName, log,
|
||||||
(err, mpuBucket) => {
|
(err, mpuBucket) => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
return next(errors.NoSuchUpload, destinationBucket);
|
return next(errors.NoSuchUpload, destinationBucket);
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -252,7 +252,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
return metadata.getObjectMD(mpuBucketName, partKey, {}, log,
|
return metadata.getObjectMD(mpuBucketName, partKey, {}, log,
|
||||||
(err, res) => {
|
(err, res) => {
|
||||||
// If there is no object with the same key, continue.
|
// If there is no object with the same key, continue.
|
||||||
if (err && !err.NoSuchKey) {
|
if (err && !err.is.NoSuchKey) {
|
||||||
log.error('error getting current part (if any)', {
|
log.error('error getting current part (if any)', {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'objectPutPart::metadata.getObjectMD',
|
method: 'objectPutPart::metadata.getObjectMD',
|
||||||
|
|
|
@ -72,7 +72,7 @@ function objectPutRetention(authInfo, request, log, callback) {
|
||||||
log.trace('object lock not enabled on bucket',
|
log.trace('object lock not enabled on bucket',
|
||||||
{ method: 'objectPutRetention' });
|
{ method: 'objectPutRetention' });
|
||||||
return next(errors.InvalidRequest.customizeDescription(
|
return next(errors.InvalidRequest.customizeDescription(
|
||||||
'Bucket is missing Object Lock Configuration'
|
'Bucket is missing Object Lock Configuration',
|
||||||
), bucket);
|
), bucket);
|
||||||
}
|
}
|
||||||
return next(null, bucket, objectMD);
|
return next(null, bucket, objectMD);
|
||||||
|
|
|
@ -35,7 +35,7 @@ function generateXml(xml, owner, userBuckets, splitter) {
|
||||||
`<Name>${key}</Name>`,
|
`<Name>${key}</Name>`,
|
||||||
`<CreationDate>${bucket.value.creationDate}` +
|
`<CreationDate>${bucket.value.creationDate}` +
|
||||||
'</CreationDate>',
|
'</CreationDate>',
|
||||||
'</Bucket>'
|
'</Bucket>',
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
xml.push('</Buckets></ListAllMyBucketsResult>');
|
xml.push('</Buckets></ListAllMyBucketsResult>');
|
||||||
|
@ -68,7 +68,7 @@ function serviceGet(authInfo, request, log, callback) {
|
||||||
`<DisplayName>${authInfo.getAccountDisplayName()}` +
|
`<DisplayName>${authInfo.getAccountDisplayName()}` +
|
||||||
'</DisplayName>',
|
'</DisplayName>',
|
||||||
'</Owner>',
|
'</Owner>',
|
||||||
'<Buckets>'
|
'<Buckets>',
|
||||||
);
|
);
|
||||||
return services.getService(authInfo, request, log, constants.splitter,
|
return services.getService(authInfo, request, log, constants.splitter,
|
||||||
(err, userBuckets, splitter) => {
|
(err, userBuckets, splitter) => {
|
||||||
|
|
|
@ -147,7 +147,7 @@ function websiteGet(request, log, callback) {
|
||||||
'bucketGet', constants.publicId, null, log, request);
|
'bucketGet', constants.publicId, null, log, request);
|
||||||
// if index object does not exist and bucket is private AWS
|
// if index object does not exist and bucket is private AWS
|
||||||
// returns 403 - AccessDenied error.
|
// returns 403 - AccessDenied error.
|
||||||
if (err === errors.NoSuchKey && !bucketAuthorized) {
|
if (err.is.NoSuchKey && !bucketAuthorized) {
|
||||||
returnErr = errors.AccessDenied;
|
returnErr = errors.AccessDenied;
|
||||||
}
|
}
|
||||||
return _errorActions(returnErr,
|
return _errorActions(returnErr,
|
||||||
|
|
|
@ -107,7 +107,7 @@ function websiteHead(request, log, callback) {
|
||||||
'bucketGet', constants.publicId, null, log, request);
|
'bucketGet', constants.publicId, null, log, request);
|
||||||
// if index object does not exist and bucket is private AWS
|
// if index object does not exist and bucket is private AWS
|
||||||
// returns 403 - AccessDenied error.
|
// returns 403 - AccessDenied error.
|
||||||
if (err === errors.NoSuchKey && !bucketAuthorized) {
|
if (err.is.NoSuchKey && !bucketAuthorized) {
|
||||||
returnErr = errors.AccessDenied;
|
returnErr = errors.AccessDenied;
|
||||||
}
|
}
|
||||||
return _errorActions(returnErr, routingRules,
|
return _errorActions(returnErr, routingRules,
|
||||||
|
|
|
@ -283,7 +283,7 @@ class V4Transform extends Transform {
|
||||||
}
|
}
|
||||||
// get next chunk
|
// get next chunk
|
||||||
return callback();
|
return callback();
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@ const { config } = require('../../Config');
|
||||||
|
|
||||||
const missingVerIdInternalError = errors.InternalError.customizeDescription(
|
const missingVerIdInternalError = errors.InternalError.customizeDescription(
|
||||||
'Invalid state. Please ensure versioning is enabled ' +
|
'Invalid state. Please ensure versioning is enabled ' +
|
||||||
'in AWS for the location constraint and try again.'
|
'in AWS for the location constraint and try again.',
|
||||||
);
|
);
|
||||||
|
|
||||||
class AwsClient {
|
class AwsClient {
|
||||||
|
@ -42,7 +42,7 @@ class AwsClient {
|
||||||
err, this._dataStoreName);
|
err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (!data.VersionId) {
|
if (!data.VersionId) {
|
||||||
|
@ -107,13 +107,13 @@ class AwsClient {
|
||||||
const error = errors.ServiceUnavailable
|
const error = errors.ServiceUnavailable
|
||||||
.customizeDescription(
|
.customizeDescription(
|
||||||
'Unexpected error from AWS: "NotFound". Data on AWS ' +
|
'Unexpected error from AWS: "NotFound". Data on AWS ' +
|
||||||
'may have been altered outside of CloudServer.'
|
'may have been altered outside of CloudServer.',
|
||||||
);
|
);
|
||||||
return callback(error);
|
return callback(error);
|
||||||
}
|
}
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return callback();
|
return callback();
|
||||||
|
@ -160,7 +160,7 @@ class AwsClient {
|
||||||
}
|
}
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return callback();
|
return callback();
|
||||||
|
@ -231,7 +231,7 @@ class AwsClient {
|
||||||
err, this._dataStoreName);
|
err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return callback(null, mpuResObj);
|
return callback(null, mpuResObj);
|
||||||
|
@ -259,7 +259,7 @@ class AwsClient {
|
||||||
'on uploadPart', err, this._dataStoreName);
|
'on uploadPart', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
// Because we manually add quotes to ETag later, remove quotes here
|
// Because we manually add quotes to ETag later, remove quotes here
|
||||||
|
@ -287,7 +287,7 @@ class AwsClient {
|
||||||
err, this._dataStoreName);
|
err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
// build storedParts object to mimic Scality S3 backend returns
|
// build storedParts object to mimic Scality S3 backend returns
|
||||||
|
@ -359,7 +359,7 @@ class AwsClient {
|
||||||
'completeMPU', err, this._dataStoreName);
|
'completeMPU', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (!completeMpuRes.VersionId) {
|
if (!completeMpuRes.VersionId) {
|
||||||
|
@ -377,7 +377,7 @@ class AwsClient {
|
||||||
'headObject', err, this._dataStoreName);
|
'headObject', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
// remove quotes from eTag because they're added later
|
// remove quotes from eTag because they're added later
|
||||||
|
@ -403,7 +403,7 @@ class AwsClient {
|
||||||
'using the same uploadId.', err, this._dataStoreName);
|
'using the same uploadId.', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return callback();
|
return callback();
|
||||||
|
@ -431,7 +431,7 @@ class AwsClient {
|
||||||
'putObjectTagging', err, this._dataStoreName);
|
'putObjectTagging', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return callback();
|
return callback();
|
||||||
|
@ -453,7 +453,7 @@ class AwsClient {
|
||||||
'deleteObjectTagging', err, this._dataStoreName);
|
'deleteObjectTagging', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return callback();
|
return callback();
|
||||||
|
@ -490,14 +490,14 @@ class AwsClient {
|
||||||
this._dataStoreName);
|
this._dataStoreName);
|
||||||
return callback(errors.AccessDenied
|
return callback(errors.AccessDenied
|
||||||
.customizeDescription('Error: Unable to access ' +
|
.customizeDescription('Error: Unable to access ' +
|
||||||
`${sourceAwsBucketName} AWS bucket`)
|
`${sourceAwsBucketName} AWS bucket`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
logHelper(log, 'error', 'error from data backend on ' +
|
logHelper(log, 'error', 'error from data backend on ' +
|
||||||
'copyObject', err, this._dataStoreName);
|
'copyObject', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (!copyResult.VersionId) {
|
if (!copyResult.VersionId) {
|
||||||
|
@ -539,14 +539,14 @@ class AwsClient {
|
||||||
this._dataStoreName);
|
this._dataStoreName);
|
||||||
return callback(errors.AccessDenied
|
return callback(errors.AccessDenied
|
||||||
.customizeDescription('Error: Unable to access ' +
|
.customizeDescription('Error: Unable to access ' +
|
||||||
`${sourceAwsBucketName} AWS bucket`)
|
`${sourceAwsBucketName} AWS bucket`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
logHelper(log, 'error', 'error from data backend on ' +
|
logHelper(log, 'error', 'error from data backend on ' +
|
||||||
'uploadPartCopy', err, this._dataStoreName);
|
'uploadPartCopy', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
const eTag = removeQuotes(res.CopyPartResult.ETag);
|
const eTag = removeQuotes(res.CopyPartResult.ETag);
|
||||||
|
|
|
@ -396,14 +396,14 @@ class AzureClient {
|
||||||
this._dataStoreName);
|
this._dataStoreName);
|
||||||
return callback(errors.AccessDenied
|
return callback(errors.AccessDenied
|
||||||
.customizeDescription('Error: Unable to access ' +
|
.customizeDescription('Error: Unable to access ' +
|
||||||
`${sourceContainerName} Azure Container`)
|
`${sourceContainerName} Azure Container`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
logHelper(log, 'error', 'error from data backend on ' +
|
logHelper(log, 'error', 'error from data backend on ' +
|
||||||
'copyObject', err, this._dataStoreName);
|
'copyObject', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (res.copy.status === 'pending') {
|
if (res.copy.status === 'pending') {
|
||||||
|
@ -417,12 +417,12 @@ class AzureClient {
|
||||||
'on abortCopyBlob', err, this._dataStoreName);
|
'on abortCopyBlob', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS on abortCopyBlob: ${err.message}`)
|
`AWS on abortCopyBlob: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return callback(errors.InvalidObjectState
|
return callback(errors.InvalidObjectState
|
||||||
.customizeDescription('Error: Azure copy status was ' +
|
.customizeDescription('Error: Azure copy status was ' +
|
||||||
'pending. It has been aborted successfully')
|
'pending. It has been aborted successfully'),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,26 +1,18 @@
|
||||||
const async = require('async');
|
const { storage } = require('arsenal');
|
||||||
const { errors, s3middleware } = require('arsenal');
|
|
||||||
const PassThrough = require('stream').PassThrough;
|
|
||||||
|
|
||||||
const DataFileInterface = require('./file/backend');
|
|
||||||
const inMemory = require('./in_memory/backend').backend;
|
|
||||||
const locationConstraintCheck =
|
|
||||||
require('../api/apiUtils/object/locationConstraintCheck');
|
|
||||||
const multipleBackendGateway = require('./multipleBackendGateway');
|
|
||||||
const utils = require('./external/utils');
|
|
||||||
const { config } = require('../Config');
|
const { config } = require('../Config');
|
||||||
const MD5Sum = s3middleware.MD5Sum;
|
|
||||||
const NullStream = s3middleware.NullStream;
|
|
||||||
const assert = require('assert');
|
|
||||||
const kms = require('../kms/wrapper');
|
const kms = require('../kms/wrapper');
|
||||||
const externalBackends = require('../../constants').externalBackends;
|
const metadata = require('../metadata/wrapper');
|
||||||
const constants = require('../../constants');
|
const vault = require('../auth/vault');
|
||||||
const { BackendInfo } = require('../api/apiUtils/object/BackendInfo');
|
const locationStorageCheck =
|
||||||
const RelayMD5Sum = require('../utilities/RelayMD5Sum');
|
require('../api/apiUtils/object/locationStorageCheck');
|
||||||
const skipError = new Error('skip');
|
const { DataWrapper, MultipleBackendGateway, parseLC } = storage.data;
|
||||||
|
const { DataFileInterface } = storage.data.file;
|
||||||
|
const inMemory = storage.data.inMemory.datastore.backend;
|
||||||
|
|
||||||
let CdmiData;
|
let CdmiData;
|
||||||
try {
|
try {
|
||||||
|
// eslint-disable-next-line import/no-unresolved
|
||||||
CdmiData = require('cdmiclient').CdmiData;
|
CdmiData = require('cdmiclient').CdmiData;
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
CdmiData = null;
|
CdmiData = null;
|
||||||
|
@ -33,10 +25,12 @@ if (config.backends.data === 'mem') {
|
||||||
client = inMemory;
|
client = inMemory;
|
||||||
implName = 'mem';
|
implName = 'mem';
|
||||||
} else if (config.backends.data === 'file') {
|
} else if (config.backends.data === 'file') {
|
||||||
client = new DataFileInterface();
|
client = new DataFileInterface(config);
|
||||||
implName = 'file';
|
implName = 'file';
|
||||||
} else if (config.backends.data === 'multiple') {
|
} else if (config.backends.data === 'multiple') {
|
||||||
client = multipleBackendGateway;
|
const clients = parseLC(config, vault);
|
||||||
|
client = new MultipleBackendGateway(
|
||||||
|
clients, metadata, locationStorageCheck);
|
||||||
implName = 'multipleBackends';
|
implName = 'multipleBackends';
|
||||||
} else if (config.backends.data === 'cdmi') {
|
} else if (config.backends.data === 'cdmi') {
|
||||||
if (!CdmiData) {
|
if (!CdmiData) {
|
||||||
|
@ -52,780 +46,16 @@ if (config.backends.data === 'mem') {
|
||||||
implName = 'cdmi';
|
implName = 'cdmi';
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
const data = new DataWrapper(
|
||||||
* _retryDelete - Attempt to delete key again if it failed previously
|
client, implName, config, kms, metadata, locationStorageCheck, vault);
|
||||||
* @param { string | object } objectGetInfo - either string location of object
|
|
||||||
* to delete or object containing info of object to delete
|
|
||||||
* @param {object} log - Werelogs request logger
|
|
||||||
* @param {number} count - keeps count of number of times function has been run
|
|
||||||
* @param {function} cb - callback
|
|
||||||
* @returns undefined and calls callback
|
|
||||||
*/
|
|
||||||
const MAX_RETRY = 2;
|
|
||||||
|
|
||||||
// This check is done because on a put, complete mpu or copy request to
|
|
||||||
// Azure/AWS, if the object already exists on that backend, the existing object
|
|
||||||
// should not be deleted, which is the functionality for all other backends
|
|
||||||
function _shouldSkipDelete(locations, requestMethod, newObjDataStoreName) {
|
|
||||||
const skipMethods = { PUT: true, POST: true };
|
|
||||||
if (!Array.isArray(locations) || !locations[0] ||
|
|
||||||
!locations[0].dataStoreType) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const isSkipBackend = externalBackends[locations[0].dataStoreType];
|
|
||||||
const isMatchingBackends =
|
|
||||||
locations[0].dataStoreName === newObjDataStoreName;
|
|
||||||
const isSkipMethod = skipMethods[requestMethod];
|
|
||||||
return (isSkipBackend && isMatchingBackends && isSkipMethod);
|
|
||||||
}
|
|
||||||
|
|
||||||
function _retryDelete(objectGetInfo, log, count, cb) {
|
|
||||||
if (count > MAX_RETRY) {
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
return client.delete(objectGetInfo, log.getSerializedUids(), err => {
|
|
||||||
if (err) {
|
|
||||||
if (err.ObjNotFound) {
|
|
||||||
log.info('no such key in datastore',
|
|
||||||
{ objectGetInfo, implName, moreRetries: 'no' });
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
log.error('delete error from datastore',
|
|
||||||
{ error: err, implName, moreRetries: 'yes' });
|
|
||||||
return _retryDelete(objectGetInfo, log, count + 1, cb);
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function _put(cipherBundle, value, valueSize,
|
|
||||||
keyContext, backendInfo, log, cb) {
|
|
||||||
assert.strictEqual(typeof valueSize, 'number');
|
|
||||||
log.debug('sending put to datastore', { implName, keyContext,
|
|
||||||
method: 'put' });
|
|
||||||
let hashedStream = null;
|
|
||||||
if (value) {
|
|
||||||
hashedStream = new MD5Sum();
|
|
||||||
value.pipe(hashedStream);
|
|
||||||
value.once('clientError', () => {
|
|
||||||
log.trace('destroying hashed stream');
|
|
||||||
hashedStream.destroy();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
config.on('location-constraints-update', () => {
|
||||||
if (implName === 'multipleBackends') {
|
if (implName === 'multipleBackends') {
|
||||||
// Need to send backendInfo to client.put and
|
const clients = parseLC(config, vault);
|
||||||
// client.put will provide dataRetrievalInfo so no
|
client = new MultipleBackendGateway(
|
||||||
// need to construct here
|
clients, metadata, locationStorageCheck);
|
||||||
/* eslint-disable no-param-reassign */
|
data.switch(client);
|
||||||
keyContext.cipherBundle = cipherBundle;
|
|
||||||
return client.put(hashedStream,
|
|
||||||
valueSize, keyContext, backendInfo, log.getSerializedUids(),
|
|
||||||
(err, dataRetrievalInfo) => {
|
|
||||||
if (err) {
|
|
||||||
log.error('put error from datastore',
|
|
||||||
{ error: err, implName });
|
|
||||||
if (err.httpCode === 408) {
|
|
||||||
return cb(errors.IncompleteBody);
|
|
||||||
}
|
}
|
||||||
return cb(errors.ServiceUnavailable);
|
|
||||||
}
|
|
||||||
return cb(null, dataRetrievalInfo, hashedStream);
|
|
||||||
});
|
});
|
||||||
}
|
|
||||||
/* eslint-enable no-param-reassign */
|
|
||||||
|
|
||||||
let writeStream = hashedStream;
|
module.exports = { data, client, implName };
|
||||||
if (cipherBundle && cipherBundle.cipher) {
|
|
||||||
writeStream = cipherBundle.cipher;
|
|
||||||
hashedStream.pipe(writeStream);
|
|
||||||
}
|
|
||||||
|
|
||||||
return client.put(writeStream, valueSize, keyContext,
|
|
||||||
log.getSerializedUids(), (err, key) => {
|
|
||||||
if (err) {
|
|
||||||
log.error('put error from datastore',
|
|
||||||
{ error: err, implName });
|
|
||||||
if (err.httpCode === 408) {
|
|
||||||
return cb(errors.IncompleteBody);
|
|
||||||
}
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
const dataRetrievalInfo = {
|
|
||||||
key,
|
|
||||||
dataStoreName: implName,
|
|
||||||
};
|
|
||||||
return cb(null, dataRetrievalInfo, hashedStream);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
const data = {
|
|
||||||
put: (cipherBundle, value, valueSize, keyContext, backendInfo, log, cb) => {
|
|
||||||
_put(cipherBundle, value, valueSize, keyContext, backendInfo, log,
|
|
||||||
(err, dataRetrievalInfo, hashedStream) => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
if (hashedStream) {
|
|
||||||
if (hashedStream.completedHash) {
|
|
||||||
return cb(null, dataRetrievalInfo, hashedStream);
|
|
||||||
}
|
|
||||||
hashedStream.on('hashed', () => {
|
|
||||||
hashedStream.removeAllListeners('hashed');
|
|
||||||
return cb(null, dataRetrievalInfo, hashedStream);
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
return cb(null, dataRetrievalInfo);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
head: (objectGetInfo, log, cb) => {
|
|
||||||
if (implName !== 'multipleBackends') {
|
|
||||||
// no-op if not multipleBackend implementation;
|
|
||||||
// head is used during get just to check external backend data state
|
|
||||||
return process.nextTick(cb);
|
|
||||||
}
|
|
||||||
return client.head(objectGetInfo, log.getSerializedUids(), cb);
|
|
||||||
},
|
|
||||||
|
|
||||||
get: (objectGetInfo, response, log, cb) => {
|
|
||||||
const isMdModelVersion2 = typeof(objectGetInfo) === 'string';
|
|
||||||
const isRequiredStringKey = constants.clientsRequireStringKey[implName];
|
|
||||||
const key = isMdModelVersion2 ? objectGetInfo : objectGetInfo.key;
|
|
||||||
const clientGetInfo = isRequiredStringKey ? key : objectGetInfo;
|
|
||||||
const range = objectGetInfo.range;
|
|
||||||
|
|
||||||
// If the key is explicitly set to null, the part to
|
|
||||||
// be read doesn't really exist and is only made of zeroes.
|
|
||||||
// This functionality is used by Scality-NFSD.
|
|
||||||
// Otherwise, the key is always defined
|
|
||||||
assert(key === null || key !== undefined);
|
|
||||||
if (key === null) {
|
|
||||||
cb(null, new NullStream(objectGetInfo.size, range));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
log.debug('sending get to datastore', { implName,
|
|
||||||
key, range, method: 'get' });
|
|
||||||
// We need to use response as a writable stream for AZURE GET
|
|
||||||
if (!isMdModelVersion2 && !isRequiredStringKey && response) {
|
|
||||||
clientGetInfo.response = response;
|
|
||||||
}
|
|
||||||
client.get(clientGetInfo, range, log.getSerializedUids(),
|
|
||||||
(err, stream) => {
|
|
||||||
if (err) {
|
|
||||||
log.error('get error from datastore',
|
|
||||||
{ error: err, implName });
|
|
||||||
return cb(errors.ServiceUnavailable);
|
|
||||||
}
|
|
||||||
if (objectGetInfo.cipheredDataKey) {
|
|
||||||
const serverSideEncryption = {
|
|
||||||
cryptoScheme: objectGetInfo.cryptoScheme,
|
|
||||||
masterKeyId: objectGetInfo.masterKeyId,
|
|
||||||
cipheredDataKey: Buffer.from(
|
|
||||||
objectGetInfo.cipheredDataKey, 'base64'),
|
|
||||||
};
|
|
||||||
const offset = objectGetInfo.range ?
|
|
||||||
objectGetInfo.range[0] : 0;
|
|
||||||
return kms.createDecipherBundle(
|
|
||||||
serverSideEncryption, offset, log,
|
|
||||||
(err, decipherBundle) => {
|
|
||||||
if (err) {
|
|
||||||
log.error('cannot get decipher bundle ' +
|
|
||||||
'from kms', {
|
|
||||||
method: 'data.wrapper.data.get',
|
|
||||||
});
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
stream.pipe(decipherBundle.decipher);
|
|
||||||
return cb(null, decipherBundle.decipher);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return cb(null, stream);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
delete: (objectGetInfo, log, cb) => {
|
|
||||||
const callback = cb || log.end;
|
|
||||||
const isMdModelVersion2 = typeof(objectGetInfo) === 'string';
|
|
||||||
const isRequiredStringKey = constants.clientsRequireStringKey[implName];
|
|
||||||
const key = isMdModelVersion2 ? objectGetInfo : objectGetInfo.key;
|
|
||||||
const clientGetInfo = isRequiredStringKey ? key : objectGetInfo;
|
|
||||||
|
|
||||||
log.trace('sending delete to datastore', {
|
|
||||||
implName, key, method: 'delete' });
|
|
||||||
// If the key is explicitly set to null, the part to
|
|
||||||
// be deleted doesn't really exist.
|
|
||||||
// This functionality is used by Scality-NFSD.
|
|
||||||
// Otherwise, the key is always defined
|
|
||||||
assert(key === null || key !== undefined);
|
|
||||||
if (key === null) {
|
|
||||||
callback(null);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
_retryDelete(clientGetInfo, log, 0, err => {
|
|
||||||
if (err && !err.ObjNotFound) {
|
|
||||||
log.error('delete error from datastore',
|
|
||||||
{ error: err, key: objectGetInfo.key, moreRetries: 'no' });
|
|
||||||
}
|
|
||||||
return callback(err);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
batchDelete: (locations, requestMethod, newObjDataStoreName, log, cb) => {
|
|
||||||
// TODO: The method of persistence of sproxy delete key will
|
|
||||||
// be finalized; refer Issue #312 for the discussion. In the
|
|
||||||
// meantime, we at least log the location of the data we are
|
|
||||||
// about to delete before attempting its deletion.
|
|
||||||
if (_shouldSkipDelete(locations, requestMethod, newObjDataStoreName)) {
|
|
||||||
return process.nextTick(cb);
|
|
||||||
}
|
|
||||||
log.trace('initiating batch delete', {
|
|
||||||
keys: locations,
|
|
||||||
implName,
|
|
||||||
method: 'batchDelete',
|
|
||||||
});
|
|
||||||
const keys = [];
|
|
||||||
let backendName = '';
|
|
||||||
const shouldBatchDelete = locations.every(l => {
|
|
||||||
// legacy sproxyd location, should fallback to using regular delete
|
|
||||||
if (typeof l === 'string') {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const { dataStoreName, key } = l;
|
|
||||||
backendName = dataStoreName;
|
|
||||||
const type = config.getLocationConstraintType(dataStoreName);
|
|
||||||
// filter out possible `null` created by NFS
|
|
||||||
if (key && type === 'scality') {
|
|
||||||
keys.push(key);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
});
|
|
||||||
if (shouldBatchDelete) {
|
|
||||||
return client.batchDelete(backendName, { keys }, log, cb);
|
|
||||||
}
|
|
||||||
return async.eachLimit(locations, 5, (loc, next) => {
|
|
||||||
process.nextTick(() => data.delete(loc, log, next));
|
|
||||||
},
|
|
||||||
err => {
|
|
||||||
if (err) {
|
|
||||||
log.end().error('batch delete failed', { error: err });
|
|
||||||
// deletion of non-existing objects result in 204
|
|
||||||
if (err.code === 404) {
|
|
||||||
return cb();
|
|
||||||
}
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
log.end().trace('batch delete successfully completed');
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
switch: newClient => {
|
|
||||||
client = newClient;
|
|
||||||
return client;
|
|
||||||
},
|
|
||||||
|
|
||||||
checkHealth: (log, cb, flightCheckOnStartUp) => {
|
|
||||||
if (!client.healthcheck) {
|
|
||||||
const defResp = {};
|
|
||||||
defResp[implName] = { code: 200, message: 'OK' };
|
|
||||||
return cb(null, defResp);
|
|
||||||
}
|
|
||||||
return client.healthcheck(flightCheckOnStartUp, log, (err, result) => {
|
|
||||||
let respBody = {};
|
|
||||||
if (err) {
|
|
||||||
log.error(`error from ${implName}`, { error: err });
|
|
||||||
respBody[implName] = {
|
|
||||||
error: err,
|
|
||||||
};
|
|
||||||
// error returned as null so async parallel doesn't return
|
|
||||||
// before all backends are checked
|
|
||||||
return cb(null, respBody);
|
|
||||||
}
|
|
||||||
if (implName === 'multipleBackends') {
|
|
||||||
respBody = result;
|
|
||||||
return cb(null, respBody);
|
|
||||||
}
|
|
||||||
respBody[implName] = {
|
|
||||||
code: result.statusCode,
|
|
||||||
message: result.statusMessage,
|
|
||||||
};
|
|
||||||
return cb(null, respBody);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
getDiskUsage: (log, cb) => {
|
|
||||||
if (!client.getDiskUsage) {
|
|
||||||
log.debug('returning empty disk usage as fallback', { implName });
|
|
||||||
return cb(null, {});
|
|
||||||
}
|
|
||||||
return client.getDiskUsage(log.getSerializedUids(), cb);
|
|
||||||
},
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* _putForCopy - put used for copying object
|
|
||||||
* @param {object} cipherBundle - cipher bundle that encrypt the data
|
|
||||||
* @param {object} stream - stream containing the data
|
|
||||||
* @param {object} part - element of dataLocator array
|
|
||||||
* @param {object} dataStoreContext - information of the
|
|
||||||
* destination object
|
|
||||||
* dataStoreContext.bucketName: destination bucket name,
|
|
||||||
* dataStoreContext.owner: owner,
|
|
||||||
* dataStoreContext.namespace: request namespace,
|
|
||||||
* dataStoreContext.objectKey: destination object key name,
|
|
||||||
* @param {BackendInfo} destBackendInfo - Instance of BackendInfo:
|
|
||||||
* Represents the info necessary to evaluate which data backend to use
|
|
||||||
* on a data put call.
|
|
||||||
* @param {object} log - Werelogs request logger
|
|
||||||
* @param {function} cb - callback
|
|
||||||
* @returns {function} cb - callback
|
|
||||||
*/
|
|
||||||
_putForCopy: (cipherBundle, stream, part, dataStoreContext,
|
|
||||||
destBackendInfo, log, cb) => data.put(cipherBundle, stream,
|
|
||||||
part.size, dataStoreContext,
|
|
||||||
destBackendInfo, log,
|
|
||||||
(error, partRetrievalInfo) => {
|
|
||||||
if (error) {
|
|
||||||
return cb(error);
|
|
||||||
}
|
|
||||||
const partResult = {
|
|
||||||
key: partRetrievalInfo.key,
|
|
||||||
dataStoreName: partRetrievalInfo
|
|
||||||
.dataStoreName,
|
|
||||||
dataStoreType: partRetrievalInfo
|
|
||||||
.dataStoreType,
|
|
||||||
start: part.start,
|
|
||||||
size: part.size,
|
|
||||||
};
|
|
||||||
if (cipherBundle) {
|
|
||||||
partResult.cryptoScheme = cipherBundle.cryptoScheme;
|
|
||||||
partResult.cipheredDataKey = cipherBundle.cipheredDataKey;
|
|
||||||
}
|
|
||||||
if (part.dataStoreETag) {
|
|
||||||
partResult.dataStoreETag = part.dataStoreETag;
|
|
||||||
}
|
|
||||||
if (partRetrievalInfo.dataStoreVersionId) {
|
|
||||||
partResult.dataStoreVersionId =
|
|
||||||
partRetrievalInfo.dataStoreVersionId;
|
|
||||||
}
|
|
||||||
return cb(null, partResult);
|
|
||||||
}),
|
|
||||||
|
|
||||||
/**
|
|
||||||
* _dataCopyPut - put used for copying object with and without
|
|
||||||
* encryption
|
|
||||||
* @param {string} serverSideEncryption - Server side encryption
|
|
||||||
* @param {object} stream - stream containing the data
|
|
||||||
* @param {object} part - element of dataLocator array
|
|
||||||
* @param {object} dataStoreContext - information of the
|
|
||||||
* destination object
|
|
||||||
* dataStoreContext.bucketName: destination bucket name,
|
|
||||||
* dataStoreContext.owner: owner,
|
|
||||||
* dataStoreContext.namespace: request namespace,
|
|
||||||
* dataStoreContext.objectKey: destination object key name,
|
|
||||||
* @param {BackendInfo} destBackendInfo - Instance of BackendInfo:
|
|
||||||
* Represents the info necessary to evaluate which data backend to use
|
|
||||||
* on a data put call.
|
|
||||||
* @param {object} log - Werelogs request logger
|
|
||||||
* @param {function} cb - callback
|
|
||||||
* @returns {function} cb - callback
|
|
||||||
*/
|
|
||||||
_dataCopyPut: (serverSideEncryption, stream, part, dataStoreContext,
|
|
||||||
destBackendInfo, log, cb) => {
|
|
||||||
if (serverSideEncryption) {
|
|
||||||
return kms.createCipherBundle(
|
|
||||||
serverSideEncryption,
|
|
||||||
log, (err, cipherBundle) => {
|
|
||||||
if (err) {
|
|
||||||
log.debug('error getting cipherBundle');
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
return data._putForCopy(cipherBundle, stream, part,
|
|
||||||
dataStoreContext, destBackendInfo, log, cb);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
// Copied object is not encrypted so just put it
|
|
||||||
// without a cipherBundle
|
|
||||||
return data._putForCopy(null, stream, part, dataStoreContext,
|
|
||||||
destBackendInfo, log, cb);
|
|
||||||
},
|
|
||||||
|
|
||||||
/**
|
|
||||||
* copyObject - copy object
|
|
||||||
* @param {object} request - request object
|
|
||||||
* @param {string} sourceLocationConstraintName -
|
|
||||||
* source locationContraint name (awsbackend, azurebackend, ...)
|
|
||||||
* @param {object} storeMetadataParams - metadata information of the
|
|
||||||
* source object
|
|
||||||
* @param {array} dataLocator - source object metadata location(s)
|
|
||||||
* NOTE: for Azure and AWS data backend this array only has one item
|
|
||||||
* @param {object} dataStoreContext - information of the
|
|
||||||
* destination object
|
|
||||||
* dataStoreContext.bucketName: destination bucket name,
|
|
||||||
* dataStoreContext.owner: owner,
|
|
||||||
* dataStoreContext.namespace: request namespace,
|
|
||||||
* dataStoreContext.objectKey: destination object key name,
|
|
||||||
* @param {BackendInfo} destBackendInfo - Instance of BackendInfo:
|
|
||||||
* Represents the info necessary to evaluate which data backend to use
|
|
||||||
* on a data put call.
|
|
||||||
* @param {object} sourceBucketMD - metadata of the source bucket
|
|
||||||
* @param {object} destBucketMD - metadata of the destination bucket
|
|
||||||
* @param {object} serverSideEncryption - server side encryption configuration
|
|
||||||
* @param {object} log - Werelogs request logger
|
|
||||||
* @param {function} cb - callback
|
|
||||||
* @returns {function} cb - callback
|
|
||||||
*/
|
|
||||||
copyObject: (request,
|
|
||||||
sourceLocationConstraintName, storeMetadataParams, dataLocator,
|
|
||||||
dataStoreContext, destBackendInfo, sourceBucketMD, destBucketMD,
|
|
||||||
serverSideEncryption, log, cb) => {
|
|
||||||
if (config.backends.data === 'multiple' &&
|
|
||||||
utils.externalBackendCopy(sourceLocationConstraintName,
|
|
||||||
storeMetadataParams.dataStoreName, sourceBucketMD, destBucketMD)
|
|
||||||
&& serverSideEncryption === null) {
|
|
||||||
const destLocationConstraintName =
|
|
||||||
storeMetadataParams.dataStoreName;
|
|
||||||
const objectGetInfo = dataLocator[0];
|
|
||||||
const externalSourceKey = objectGetInfo.key;
|
|
||||||
return client.copyObject(request, destLocationConstraintName,
|
|
||||||
externalSourceKey, sourceLocationConstraintName,
|
|
||||||
storeMetadataParams, log, (error, objectRetrievalInfo) => {
|
|
||||||
if (error) {
|
|
||||||
return cb(error);
|
|
||||||
}
|
|
||||||
const putResult = {
|
|
||||||
key: objectRetrievalInfo.key,
|
|
||||||
dataStoreName: objectRetrievalInfo.
|
|
||||||
dataStoreName,
|
|
||||||
dataStoreType: objectRetrievalInfo.
|
|
||||||
dataStoreType,
|
|
||||||
dataStoreVersionId:
|
|
||||||
objectRetrievalInfo.dataStoreVersionId,
|
|
||||||
size: storeMetadataParams.size,
|
|
||||||
dataStoreETag: objectGetInfo.dataStoreETag,
|
|
||||||
start: objectGetInfo.start,
|
|
||||||
};
|
|
||||||
const putResultArr = [putResult];
|
|
||||||
return cb(null, putResultArr);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// dataLocator is an array. need to get and put all parts
|
|
||||||
// For now, copy 1 part at a time. Could increase the second
|
|
||||||
// argument here to increase the number of parts
|
|
||||||
// copied at once.
|
|
||||||
return async.mapLimit(dataLocator, 1,
|
|
||||||
// eslint-disable-next-line prefer-arrow-callback
|
|
||||||
function copyPart(part, copyCb) {
|
|
||||||
if (part.dataStoreType === 'azure') {
|
|
||||||
const passThrough = new PassThrough();
|
|
||||||
return async.parallel([
|
|
||||||
parallelCb => data.get(part, passThrough, log, err =>
|
|
||||||
parallelCb(err)),
|
|
||||||
parallelCb => data._dataCopyPut(serverSideEncryption,
|
|
||||||
passThrough,
|
|
||||||
part, dataStoreContext, destBackendInfo, log,
|
|
||||||
parallelCb),
|
|
||||||
], (err, res) => {
|
|
||||||
if (err) {
|
|
||||||
return copyCb(err);
|
|
||||||
}
|
|
||||||
return copyCb(null, res[1]);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return data.get(part, null, log, (err, stream) => {
|
|
||||||
if (err) {
|
|
||||||
return copyCb(err);
|
|
||||||
}
|
|
||||||
return data._dataCopyPut(serverSideEncryption, stream,
|
|
||||||
part, dataStoreContext, destBackendInfo, log, copyCb);
|
|
||||||
});
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
log.debug('error transferring data from source',
|
|
||||||
{ error: err });
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
return cb(null, results);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
|
|
||||||
_dataCopyPutPart: (request,
|
|
||||||
serverSideEncryption, stream, part,
|
|
||||||
dataStoreContext, destBackendInfo, locations, log, cb) => {
|
|
||||||
const numberPartSize =
|
|
||||||
Number.parseInt(part.size, 10);
|
|
||||||
const partNumber = Number.parseInt(request.query.partNumber, 10);
|
|
||||||
const uploadId = request.query.uploadId;
|
|
||||||
const destObjectKey = request.objectKey;
|
|
||||||
const destBucketName = request.bucketName;
|
|
||||||
const destLocationConstraintName = destBackendInfo
|
|
||||||
.getControllingLocationConstraint();
|
|
||||||
if (externalBackends[config
|
|
||||||
.locationConstraints[destLocationConstraintName]
|
|
||||||
.type]) {
|
|
||||||
return multipleBackendGateway.uploadPart(null, null,
|
|
||||||
stream, numberPartSize,
|
|
||||||
destLocationConstraintName, destObjectKey, uploadId,
|
|
||||||
partNumber, destBucketName, log,
|
|
||||||
(err, partInfo) => {
|
|
||||||
if (err) {
|
|
||||||
log.error('error putting ' +
|
|
||||||
'part to AWS', {
|
|
||||||
error: err,
|
|
||||||
method:
|
|
||||||
'objectPutCopyPart::' +
|
|
||||||
'multipleBackendGateway.' +
|
|
||||||
'uploadPart',
|
|
||||||
});
|
|
||||||
return cb(errors.ServiceUnavailable);
|
|
||||||
}
|
|
||||||
// skip to end of waterfall
|
|
||||||
// because don't need to store
|
|
||||||
// part metadata
|
|
||||||
if (partInfo &&
|
|
||||||
partInfo.dataStoreType === 'aws_s3') {
|
|
||||||
// if data backend handles MPU, skip to end
|
|
||||||
// of waterfall
|
|
||||||
const partResult = {
|
|
||||||
dataStoreETag: partInfo.dataStoreETag,
|
|
||||||
};
|
|
||||||
locations.push(partResult);
|
|
||||||
return cb(skipError, partInfo.dataStoreETag);
|
|
||||||
} else if (
|
|
||||||
partInfo &&
|
|
||||||
partInfo.dataStoreType === 'azure') {
|
|
||||||
const partResult = {
|
|
||||||
key: partInfo.key,
|
|
||||||
dataStoreName: partInfo.dataStoreName,
|
|
||||||
dataStoreETag: partInfo.dataStoreETag,
|
|
||||||
size: numberPartSize,
|
|
||||||
numberSubParts:
|
|
||||||
partInfo.numberSubParts,
|
|
||||||
partNumber: partInfo.partNumber,
|
|
||||||
};
|
|
||||||
locations.push(partResult);
|
|
||||||
return cb();
|
|
||||||
}
|
|
||||||
return cb(skipError);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if (serverSideEncryption) {
|
|
||||||
return kms.createCipherBundle(
|
|
||||||
serverSideEncryption,
|
|
||||||
log, (err, cipherBundle) => {
|
|
||||||
if (err) {
|
|
||||||
log.debug('error getting cipherBundle',
|
|
||||||
{ error: err });
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
return data.put(cipherBundle, stream,
|
|
||||||
numberPartSize, dataStoreContext,
|
|
||||||
destBackendInfo, log,
|
|
||||||
(error, partRetrievalInfo,
|
|
||||||
hashedStream) => {
|
|
||||||
if (error) {
|
|
||||||
log.debug('error putting ' +
|
|
||||||
'encrypted part', { error });
|
|
||||||
return cb(error);
|
|
||||||
}
|
|
||||||
const partResult = {
|
|
||||||
key: partRetrievalInfo.key,
|
|
||||||
dataStoreName: partRetrievalInfo
|
|
||||||
.dataStoreName,
|
|
||||||
dataStoreETag: hashedStream
|
|
||||||
.completedHash,
|
|
||||||
// Do not include part start
|
|
||||||
// here since will change in
|
|
||||||
// final MPU object
|
|
||||||
size: numberPartSize,
|
|
||||||
sseCryptoScheme: cipherBundle
|
|
||||||
.cryptoScheme,
|
|
||||||
sseCipheredDataKey: cipherBundle
|
|
||||||
.cipheredDataKey,
|
|
||||||
sseAlgorithm: cipherBundle
|
|
||||||
.algorithm,
|
|
||||||
sseMasterKeyId: cipherBundle
|
|
||||||
.masterKeyId,
|
|
||||||
};
|
|
||||||
locations.push(partResult);
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
// Copied object is not encrypted so just put it
|
|
||||||
// without a cipherBundle
|
|
||||||
return data.put(null, stream, numberPartSize,
|
|
||||||
dataStoreContext, destBackendInfo,
|
|
||||||
log, (error, partRetrievalInfo, hashedStream) => {
|
|
||||||
if (error) {
|
|
||||||
log.debug('error putting object part',
|
|
||||||
{ error });
|
|
||||||
return cb(error);
|
|
||||||
}
|
|
||||||
const partResult = {
|
|
||||||
key: partRetrievalInfo.key,
|
|
||||||
dataStoreName: partRetrievalInfo.dataStoreName,
|
|
||||||
dataStoreETag: hashedStream.completedHash,
|
|
||||||
size: numberPartSize,
|
|
||||||
};
|
|
||||||
locations.push(partResult);
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
/**
|
|
||||||
* uploadPartCopy - put copy part
|
|
||||||
* @param {object} request - request object
|
|
||||||
* @param {object} log - Werelogs request logger
|
|
||||||
* @param {object} destBucketMD - destination bucket metadata
|
|
||||||
* @param {string} sourceLocationConstraintName -
|
|
||||||
* source locationContraint name (awsbackend, azurebackend, ...)
|
|
||||||
* @param {string} destLocationConstraintName -
|
|
||||||
* location of the destination MPU object (awsbackend, azurebackend, ...)
|
|
||||||
* @param {array} dataLocator - source object metadata location(s)
|
|
||||||
* NOTE: for Azure and AWS data backend this array
|
|
||||||
* @param {object} dataStoreContext - information of the
|
|
||||||
* destination object
|
|
||||||
* dataStoreContext.bucketName: destination bucket name,
|
|
||||||
* dataStoreContext.owner: owner,
|
|
||||||
* dataStoreContext.namespace: request namespace,
|
|
||||||
* dataStoreContext.objectKey: destination object key name,
|
|
||||||
* dataStoreContext.uploadId: uploadId
|
|
||||||
* dataStoreContext.partNumber: request.query.partNumber
|
|
||||||
* @param {function} callback - callback
|
|
||||||
* @returns {function} cb - callback
|
|
||||||
*/
|
|
||||||
uploadPartCopy: (request, log, destBucketMD, sourceLocationConstraintName,
|
|
||||||
destLocationConstraintName, dataLocator, dataStoreContext,
|
|
||||||
callback) => {
|
|
||||||
const serverSideEncryption = destBucketMD.getServerSideEncryption();
|
|
||||||
const lastModified = new Date().toJSON();
|
|
||||||
|
|
||||||
// skip if 0 byte object
|
|
||||||
if (dataLocator.length === 0) {
|
|
||||||
return process.nextTick(() => {
|
|
||||||
callback(null, constants.emptyFileMd5,
|
|
||||||
lastModified, serverSideEncryption, []);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// if destination mpu was initiated in legacy version
|
|
||||||
if (destLocationConstraintName === undefined) {
|
|
||||||
const backendInfoObj = locationConstraintCheck(request,
|
|
||||||
null, destBucketMD, log);
|
|
||||||
if (backendInfoObj.err) {
|
|
||||||
return process.nextTick(() => {
|
|
||||||
callback(backendInfoObj.err);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
destLocationConstraintName = backendInfoObj.controllingLC;
|
|
||||||
}
|
|
||||||
|
|
||||||
const locationTypeMatchAWS =
|
|
||||||
config.backends.data === 'multiple' &&
|
|
||||||
config.getLocationConstraintType(sourceLocationConstraintName) ===
|
|
||||||
config.getLocationConstraintType(destLocationConstraintName) &&
|
|
||||||
config.getLocationConstraintType(sourceLocationConstraintName) ===
|
|
||||||
'aws_s3';
|
|
||||||
|
|
||||||
// NOTE: using multipleBackendGateway.uploadPartCopy only if copying
|
|
||||||
// from AWS to AWS
|
|
||||||
|
|
||||||
if (locationTypeMatchAWS && dataLocator.length === 1) {
|
|
||||||
const awsSourceKey = dataLocator[0].key;
|
|
||||||
return multipleBackendGateway.uploadPartCopy(request,
|
|
||||||
destLocationConstraintName, awsSourceKey,
|
|
||||||
sourceLocationConstraintName, log, (error, eTag) => {
|
|
||||||
if (error) {
|
|
||||||
return callback(error);
|
|
||||||
}
|
|
||||||
return callback(skipError, eTag,
|
|
||||||
lastModified, serverSideEncryption);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
const backendInfo = new BackendInfo(destLocationConstraintName);
|
|
||||||
|
|
||||||
// totalHash will be sent through the RelayMD5Sum transform streams
|
|
||||||
// to collect the md5 from multiple streams
|
|
||||||
let totalHash;
|
|
||||||
const locations = [];
|
|
||||||
// dataLocator is an array. need to get and put all parts
|
|
||||||
// in order so can get the ETag of full object
|
|
||||||
return async.forEachOfSeries(dataLocator,
|
|
||||||
// eslint-disable-next-line prefer-arrow-callback
|
|
||||||
function copyPart(part, index, cb) {
|
|
||||||
if (part.dataStoreType === 'azure') {
|
|
||||||
const passThrough = new PassThrough();
|
|
||||||
return async.parallel([
|
|
||||||
next => data.get(part, passThrough, log, err => {
|
|
||||||
if (err) {
|
|
||||||
log.error('error getting data part ' +
|
|
||||||
'from Azure', {
|
|
||||||
error: err,
|
|
||||||
method:
|
|
||||||
'objectPutCopyPart::' +
|
|
||||||
'multipleBackendGateway.' +
|
|
||||||
'copyPart',
|
|
||||||
});
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => data._dataCopyPutPart(request,
|
|
||||||
serverSideEncryption, passThrough, part,
|
|
||||||
dataStoreContext, backendInfo, locations, log, next),
|
|
||||||
], err => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return data.get(part, null, log, (err, stream) => {
|
|
||||||
if (err) {
|
|
||||||
log.debug('error getting object part',
|
|
||||||
{ error: err });
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
const hashedStream =
|
|
||||||
new RelayMD5Sum(totalHash, updatedHash => {
|
|
||||||
totalHash = updatedHash;
|
|
||||||
});
|
|
||||||
stream.pipe(hashedStream);
|
|
||||||
|
|
||||||
// destLocationConstraintName is location of the
|
|
||||||
// destination MPU object
|
|
||||||
return data._dataCopyPutPart(request,
|
|
||||||
serverSideEncryption, hashedStream, part,
|
|
||||||
dataStoreContext, backendInfo, locations, log, cb);
|
|
||||||
});
|
|
||||||
}, err => {
|
|
||||||
// Digest the final combination of all of the part streams
|
|
||||||
if (err && err !== skipError) {
|
|
||||||
log.debug('error transferring data from source',
|
|
||||||
{ error: err, method: 'goGetData' });
|
|
||||||
return callback(err);
|
|
||||||
}
|
|
||||||
if (totalHash) {
|
|
||||||
totalHash = totalHash.digest('hex');
|
|
||||||
} else {
|
|
||||||
totalHash = locations[0].dataStoreETag;
|
|
||||||
}
|
|
||||||
if (err && err === skipError) {
|
|
||||||
return callback(skipError, totalHash,
|
|
||||||
lastModified, serverSideEncryption);
|
|
||||||
}
|
|
||||||
return callback(null, totalHash,
|
|
||||||
lastModified, serverSideEncryption, locations);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
module.exports = data;
|
|
||||||
|
|
|
@ -11,7 +11,6 @@ const Common = require('./common');
|
||||||
let scalityKMS;
|
let scalityKMS;
|
||||||
let scalityKMSImpl;
|
let scalityKMSImpl;
|
||||||
try {
|
try {
|
||||||
// eslint-disable-next-line import/no-unresolved
|
|
||||||
const ScalityKMS = require('scality-kms');
|
const ScalityKMS = require('scality-kms');
|
||||||
scalityKMS = new ScalityKMS(config.kms);
|
scalityKMS = new ScalityKMS(config.kms);
|
||||||
scalityKMSImpl = 'scalityKms';
|
scalityKMSImpl = 'scalityKms';
|
||||||
|
|
|
@ -13,7 +13,6 @@ const versionSep = arsenal.versioning.VersioningConstants.VersionId.Separator;
|
||||||
const METASTORE = '__metastore';
|
const METASTORE = '__metastore';
|
||||||
|
|
||||||
class BucketFileInterface {
|
class BucketFileInterface {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @constructor
|
* @constructor
|
||||||
* @param {object} [params] - constructor params
|
* @param {object} [params] - constructor params
|
||||||
|
@ -84,7 +83,7 @@ class BucketFileInterface {
|
||||||
|
|
||||||
createBucket(bucketName, bucketMD, log, cb) {
|
createBucket(bucketName, bucketMD, log, cb) {
|
||||||
this.getBucketAttributes(bucketName, log, err => {
|
this.getBucketAttributes(bucketName, log, err => {
|
||||||
if (err && err !== errors.NoSuchBucket) {
|
if (err && !err.is.NoSuchBucket) {
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
if (err === undefined) {
|
if (err === undefined) {
|
||||||
|
|
|
@ -146,7 +146,7 @@ const metastore = {
|
||||||
return cb(null, {
|
return cb(null, {
|
||||||
bucket: bucket.serialize(),
|
bucket: bucket.serialize(),
|
||||||
obj: JSON.stringify(
|
obj: JSON.stringify(
|
||||||
metadata.keyMaps.get(bucketName).get(objName)
|
metadata.keyMaps.get(bucketName).get(objName),
|
||||||
),
|
),
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -719,7 +719,7 @@ function batchDelete(request, response, log, callback) {
|
||||||
log.trace('batch delete locations', { locations });
|
log.trace('batch delete locations', { locations });
|
||||||
return async.eachLimit(locations, 5, (loc, next) => {
|
return async.eachLimit(locations, 5, (loc, next) => {
|
||||||
data.delete(loc, log, err => {
|
data.delete(loc, log, err => {
|
||||||
if (err && err.ObjNotFound) {
|
if (err?.is.ObjNotFound) {
|
||||||
log.info('batch delete: data location do not exist', {
|
log.info('batch delete: data location do not exist', {
|
||||||
method: 'batchDelete',
|
method: 'batchDelete',
|
||||||
location: loc,
|
location: loc,
|
||||||
|
|
|
@ -16,7 +16,7 @@ function getMetricToPush(prevObjectMD, newObjectMD) {
|
||||||
assert.deepStrictEqual(prevObjectMD.getAcl(), newObjectMD.getAcl());
|
assert.deepStrictEqual(prevObjectMD.getAcl(), newObjectMD.getAcl());
|
||||||
assert.deepStrictEqual(
|
assert.deepStrictEqual(
|
||||||
prevObjectMD.getTags(),
|
prevObjectMD.getTags(),
|
||||||
newObjectMD.getTags()
|
newObjectMD.getTags(),
|
||||||
);
|
);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
return 'replicateTags';
|
return 'replicateTags';
|
||||||
|
|
|
@ -10,10 +10,18 @@ const { clientCheck } = require('./utilities/healthcheckHandler');
|
||||||
const _config = require('./Config').config;
|
const _config = require('./Config').config;
|
||||||
const { blacklistedPrefixes } = require('../constants');
|
const { blacklistedPrefixes } = require('../constants');
|
||||||
const api = require('./api/api');
|
const api = require('./api/api');
|
||||||
const data = require('./data/wrapper');
|
const dataWrapper = require('./data/wrapper');
|
||||||
|
const kms = require('./kms/wrapper');
|
||||||
|
const locationStorageCheck =
|
||||||
|
require('./api/apiUtils/object/locationStorageCheck');
|
||||||
|
const vault = require('./auth/vault');
|
||||||
|
const metadata = require('./metadata/wrapper');
|
||||||
|
|
||||||
const routes = arsenal.s3routes.routes;
|
const routes = arsenal.s3routes.routes;
|
||||||
|
const { parseLC, MultipleBackendGateway } = arsenal.storage.data;
|
||||||
const websiteEndpoints = _config.websiteEndpoints;
|
const websiteEndpoints = _config.websiteEndpoints;
|
||||||
|
let client = dataWrapper.client;
|
||||||
|
const implName = dataWrapper.implName;
|
||||||
|
|
||||||
let allEndpoints;
|
let allEndpoints;
|
||||||
function updateAllEndpoints() {
|
function updateAllEndpoints() {
|
||||||
|
@ -21,6 +29,13 @@ function updateAllEndpoints() {
|
||||||
}
|
}
|
||||||
_config.on('rest-endpoints-update', updateAllEndpoints);
|
_config.on('rest-endpoints-update', updateAllEndpoints);
|
||||||
updateAllEndpoints();
|
updateAllEndpoints();
|
||||||
|
_config.on('location-constraints-update', () => {
|
||||||
|
if (implName === 'multipleBackends') {
|
||||||
|
const clients = parseLC(_config, vault);
|
||||||
|
client = new MultipleBackendGateway(
|
||||||
|
clients, metadata, locationStorageCheck);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
// redis client
|
// redis client
|
||||||
let localCacheClient;
|
let localCacheClient;
|
||||||
|
@ -78,7 +93,15 @@ class S3Server {
|
||||||
allEndpoints,
|
allEndpoints,
|
||||||
websiteEndpoints,
|
websiteEndpoints,
|
||||||
blacklistedPrefixes,
|
blacklistedPrefixes,
|
||||||
dataRetrievalFn: data.get,
|
dataRetrievalParams: {
|
||||||
|
client,
|
||||||
|
implName,
|
||||||
|
config: _config,
|
||||||
|
kms,
|
||||||
|
metadata,
|
||||||
|
locStorageCheckFn: locationStorageCheck,
|
||||||
|
vault,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
routes(req, res, params, logger, _config);
|
routes(req, res, params, logger, _config);
|
||||||
}
|
}
|
||||||
|
@ -144,7 +167,7 @@ class S3Server {
|
||||||
cleanUp() {
|
cleanUp() {
|
||||||
logger.info('server shutting down');
|
logger.info('server shutting down');
|
||||||
Promise.all(this.servers.map(server =>
|
Promise.all(this.servers.map(server =>
|
||||||
new Promise(resolve => server.close(resolve))
|
new Promise(resolve => server.close(resolve)),
|
||||||
)).then(() => process.exit(0));
|
)).then(() => process.exit(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ const services = {
|
||||||
// buckets to list. By returning an empty array, the
|
// buckets to list. By returning an empty array, the
|
||||||
// getService API will just respond with the user info
|
// getService API will just respond with the user info
|
||||||
// without listing any buckets.
|
// without listing any buckets.
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
log.trace('no buckets found');
|
log.trace('no buckets found');
|
||||||
// If we checked the old user bucket, that means we
|
// If we checked the old user bucket, that means we
|
||||||
// already checked the new user bucket. If neither the
|
// already checked the new user bucket. If neither the
|
||||||
|
@ -555,7 +555,7 @@ const services = {
|
||||||
// If the MPU was initiated, the mpu bucket should exist.
|
// If the MPU was initiated, the mpu bucket should exist.
|
||||||
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
||||||
metadata.getBucket(mpuBucketName, log, (err, mpuBucket) => {
|
metadata.getBucket(mpuBucketName, log, (err, mpuBucket) => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
log.debug('bucket not found in metadata', { error: err,
|
log.debug('bucket not found in metadata', { error: err,
|
||||||
method: 'services.metadataValidateMultipart' });
|
method: 'services.metadataValidateMultipart' });
|
||||||
return cb(errors.NoSuchUpload);
|
return cb(errors.NoSuchUpload);
|
||||||
|
@ -577,7 +577,7 @@ const services = {
|
||||||
metadata.getObjectMD(mpuBucket.getName(), mpuOverviewKey,
|
metadata.getObjectMD(mpuBucket.getName(), mpuOverviewKey,
|
||||||
{}, log, (err, storedMetadata) => {
|
{}, log, (err, storedMetadata) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
if (err.NoSuchKey) {
|
if (err.is.NoSuchKey) {
|
||||||
return cb(errors.NoSuchUpload);
|
return cb(errors.NoSuchUpload);
|
||||||
}
|
}
|
||||||
log.error('error from metadata', { error: err });
|
log.error('error from metadata', { error: err });
|
||||||
|
@ -753,7 +753,7 @@ const services = {
|
||||||
assert.strictEqual(typeof bucketName, 'string');
|
assert.strictEqual(typeof bucketName, 'string');
|
||||||
const MPUBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
const MPUBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
||||||
metadata.getBucket(MPUBucketName, log, (err, bucket) => {
|
metadata.getBucket(MPUBucketName, log, (err, bucket) => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
log.trace('no buckets found');
|
log.trace('no buckets found');
|
||||||
const creationDate = new Date().toJSON();
|
const creationDate = new Date().toJSON();
|
||||||
const mpuBucket = new BucketInfo(MPUBucketName,
|
const mpuBucket = new BucketInfo(MPUBucketName,
|
||||||
|
|
|
@ -241,7 +241,7 @@ aclUtils.convertToXml = grantInfo => {
|
||||||
`<DisplayName>${escapeForXml(ownerInfo.displayName)}` +
|
`<DisplayName>${escapeForXml(ownerInfo.displayName)}` +
|
||||||
'</DisplayName>',
|
'</DisplayName>',
|
||||||
'</Owner>',
|
'</Owner>',
|
||||||
'<AccessControlList>'
|
'<AccessControlList>',
|
||||||
);
|
);
|
||||||
|
|
||||||
grants.forEach(grant => {
|
grants.forEach(grant => {
|
||||||
|
@ -252,29 +252,29 @@ aclUtils.convertToXml = grantInfo => {
|
||||||
if (grant.ID) {
|
if (grant.ID) {
|
||||||
xml.push('<Grantee xmlns:xsi="http://www.w3.org/2001/' +
|
xml.push('<Grantee xmlns:xsi="http://www.w3.org/2001/' +
|
||||||
'XMLSchema-instance" xsi:type="CanonicalUser">',
|
'XMLSchema-instance" xsi:type="CanonicalUser">',
|
||||||
`<ID>${grant.ID}</ID>`
|
`<ID>${grant.ID}</ID>`,
|
||||||
);
|
);
|
||||||
} else if (grant.URI) {
|
} else if (grant.URI) {
|
||||||
xml.push('<Grantee xmlns:xsi="http://www.w3.org/2001/' +
|
xml.push('<Grantee xmlns:xsi="http://www.w3.org/2001/' +
|
||||||
'XMLSchema-instance" xsi:type="Group">',
|
'XMLSchema-instance" xsi:type="Group">',
|
||||||
`<URI>${escapeForXml(grant.URI)}</URI>`
|
`<URI>${escapeForXml(grant.URI)}</URI>`,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (grant.displayName) {
|
if (grant.displayName) {
|
||||||
xml.push(`<DisplayName>${escapeForXml(grant.displayName)}` +
|
xml.push(`<DisplayName>${escapeForXml(grant.displayName)}` +
|
||||||
'</DisplayName>'
|
'</DisplayName>',
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
xml.push('</Grantee>',
|
xml.push('</Grantee>',
|
||||||
`<Permission>${grant.permission}</Permission>`,
|
`<Permission>${grant.permission}</Permission>`,
|
||||||
'</Grant>'
|
'</Grant>',
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
xml.push('</AccessControlList>',
|
xml.push('</AccessControlList>',
|
||||||
'</AccessControlPolicy>'
|
'</AccessControlPolicy>',
|
||||||
);
|
);
|
||||||
|
|
||||||
return xml.join('');
|
return xml.join('');
|
||||||
|
@ -351,7 +351,7 @@ aclUtils.getCanonicalIDs = function getCanonicalIDs(acl) {
|
||||||
acl.WRITE,
|
acl.WRITE,
|
||||||
acl.WRITE_ACP,
|
acl.WRITE_ACP,
|
||||||
acl.READ,
|
acl.READ,
|
||||||
acl.READ_ACP
|
acl.READ_ACP,
|
||||||
);
|
);
|
||||||
const uniqueGrantees = Array.from(new Set(aclGrantees));
|
const uniqueGrantees = Array.from(new Set(aclGrantees));
|
||||||
// grantees can be a mix of canonicalIDs and predefined groups in the form
|
// grantees can be a mix of canonicalIDs and predefined groups in the form
|
||||||
|
|
|
@ -70,7 +70,7 @@ function clientCheck(flightCheckOnStartUp, log, cb) {
|
||||||
// if there is an error from an external backend,
|
// if there is an error from an external backend,
|
||||||
// only return a 500 if it is on startup
|
// only return a 500 if it is on startup
|
||||||
// (flightCheckOnStartUp set to true)
|
// (flightCheckOnStartUp set to true)
|
||||||
obj[k].error && (flightCheckOnStartUp || !obj[k].external)
|
obj[k].error && (flightCheckOnStartUp || !obj[k].external),
|
||||||
);
|
);
|
||||||
if (fail) {
|
if (fail) {
|
||||||
return cb(errors.InternalError, obj);
|
return cb(errors.InternalError, obj);
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
"homepage": "https://github.com/scality/S3#readme",
|
"homepage": "https://github.com/scality/S3#readme",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@hapi/joi": "^17.1.0",
|
"@hapi/joi": "^17.1.0",
|
||||||
"arsenal": "git+https://github.com/scality/arsenal#7.10.15",
|
"arsenal": "git+https://github.com/scality/arsenal#7.10.23",
|
||||||
"async": "~2.5.0",
|
"async": "~2.5.0",
|
||||||
"aws-sdk": "2.905.0",
|
"aws-sdk": "2.905.0",
|
||||||
"azure-storage": "^2.1.0",
|
"azure-storage": "^2.1.0",
|
||||||
|
@ -44,7 +44,7 @@
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"bluebird": "^3.3.1",
|
"bluebird": "^3.3.1",
|
||||||
"eslint": "^2.4.0",
|
"eslint": "^8.14.0",
|
||||||
"eslint-config-airbnb": "^6.0.0",
|
"eslint-config-airbnb": "^6.0.0",
|
||||||
"eslint-config-scality": "scality/Guidelines#7.10.2",
|
"eslint-config-scality": "scality/Guidelines#7.10.2",
|
||||||
"ioredis": "4.9.5",
|
"ioredis": "4.9.5",
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
'use strict'; // eslint-disable-line strict
|
||||||
require('./test.js'); // eslint-disable-line import/no-unresolved
|
require('./test.js');
|
||||||
|
|
|
@ -30,7 +30,7 @@ class BucketUtility {
|
||||||
|
|
||||||
createMany(bucketNames) {
|
createMany(bucketNames) {
|
||||||
const promises = bucketNames.map(
|
const promises = bucketNames.map(
|
||||||
bucketName => this.createOne(bucketName)
|
bucketName => this.createOne(bucketName),
|
||||||
);
|
);
|
||||||
|
|
||||||
return Promise.all(promises);
|
return Promise.all(promises);
|
||||||
|
@ -57,7 +57,7 @@ class BucketUtility {
|
||||||
|
|
||||||
deleteMany(bucketNames) {
|
deleteMany(bucketNames) {
|
||||||
const promises = bucketNames.map(
|
const promises = bucketNames.map(
|
||||||
bucketName => this.deleteOne(bucketName)
|
bucketName => this.deleteOne(bucketName),
|
||||||
);
|
);
|
||||||
|
|
||||||
return Promise.all(promises);
|
return Promise.all(promises);
|
||||||
|
@ -87,7 +87,7 @@ class BucketUtility {
|
||||||
Key: object.Key,
|
Key: object.Key,
|
||||||
VersionId: object.VersionId,
|
VersionId: object.VersionId,
|
||||||
}).promise()
|
}).promise()
|
||||||
.then(() => object)
|
.then(() => object),
|
||||||
)
|
)
|
||||||
.concat(data.Versions
|
.concat(data.Versions
|
||||||
.filter(object => object.Key.endsWith('/'))
|
.filter(object => object.Key.endsWith('/'))
|
||||||
|
@ -98,8 +98,8 @@ class BucketUtility {
|
||||||
Key: object.Key,
|
Key: object.Key,
|
||||||
VersionId: object.VersionId,
|
VersionId: object.VersionId,
|
||||||
}).promise()
|
}).promise()
|
||||||
.then(() => object)
|
.then(() => object),
|
||||||
)
|
),
|
||||||
)
|
)
|
||||||
.concat(data.DeleteMarkers
|
.concat(data.DeleteMarkers
|
||||||
.map(object =>
|
.map(object =>
|
||||||
|
@ -108,14 +108,14 @@ class BucketUtility {
|
||||||
Key: object.Key,
|
Key: object.Key,
|
||||||
VersionId: object.VersionId,
|
VersionId: object.VersionId,
|
||||||
}).promise()
|
}).promise()
|
||||||
.then(() => object)))
|
.then(() => object))),
|
||||||
)
|
),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
emptyMany(bucketNames) {
|
emptyMany(bucketNames) {
|
||||||
const promises = bucketNames.map(
|
const promises = bucketNames.map(
|
||||||
bucketName => this.empty(bucketName)
|
bucketName => this.empty(bucketName),
|
||||||
);
|
);
|
||||||
|
|
||||||
return Promise.all(promises);
|
return Promise.all(promises);
|
||||||
|
|
|
@ -179,7 +179,7 @@ withV4(sigCfg => {
|
||||||
assert.notStrictEqual(err, null);
|
assert.notStrictEqual(err, null);
|
||||||
assert.strictEqual(
|
assert.strictEqual(
|
||||||
err.statusCode,
|
err.statusCode,
|
||||||
errors.AccessDenied.code
|
errors.AccessDenied.code,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
done();
|
done();
|
||||||
|
|
|
@ -81,7 +81,7 @@ describe('aws-node-sdk test deleteBucketReplication', () => {
|
||||||
}),
|
}),
|
||||||
next => deleteReplicationAndCheckResponse(bucket, next),
|
next => deleteReplicationAndCheckResponse(bucket, next),
|
||||||
next => s3.getBucketReplication({ Bucket: bucket }, err => {
|
next => s3.getBucketReplication({ Bucket: bucket }, err => {
|
||||||
assert(errors.ReplicationConfigurationNotFoundError[err.code]);
|
assert(errors.ReplicationConfigurationNotFoundError.is[err.code]);
|
||||||
return next();
|
return next();
|
||||||
}),
|
}),
|
||||||
], done));
|
], done));
|
||||||
|
|
|
@ -45,7 +45,7 @@ describe('aws-node-sdk test getBucketReplication', () => {
|
||||||
it("should return 'ReplicationConfigurationNotFoundError' if bucket does " +
|
it("should return 'ReplicationConfigurationNotFoundError' if bucket does " +
|
||||||
'not have a replication configuration', done =>
|
'not have a replication configuration', done =>
|
||||||
s3.getBucketReplication({ Bucket: bucket }, err => {
|
s3.getBucketReplication({ Bucket: bucket }, err => {
|
||||||
assert(errors.ReplicationConfigurationNotFoundError[err.code]);
|
assert(errors.ReplicationConfigurationNotFoundError.is[err.code]);
|
||||||
return done();
|
return done();
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
|
|
@ -137,21 +137,21 @@ describe('PUT Bucket - AWS.S3.createBucket', () => {
|
||||||
done => {
|
done => {
|
||||||
const longName = 'x'.repeat(64);
|
const longName = 'x'.repeat(64);
|
||||||
testFn(longName, done);
|
testFn(longName, done);
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
itSkipIfAWS('should return 400 if name is formatted as IP address',
|
itSkipIfAWS('should return 400 if name is formatted as IP address',
|
||||||
done => {
|
done => {
|
||||||
const ipAddress = '192.168.5.4';
|
const ipAddress = '192.168.5.4';
|
||||||
testFn(ipAddress, done);
|
testFn(ipAddress, done);
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
itSkipIfAWS('should return 400 if name starts with period',
|
itSkipIfAWS('should return 400 if name starts with period',
|
||||||
done => {
|
done => {
|
||||||
const invalidName = '.myawsbucket';
|
const invalidName = '.myawsbucket';
|
||||||
testFn(invalidName, done);
|
testFn(invalidName, done);
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
it('should return 400 if name ends with period', done => {
|
it('should return 400 if name ends with period', done => {
|
||||||
|
@ -164,7 +164,7 @@ describe('PUT Bucket - AWS.S3.createBucket', () => {
|
||||||
done => {
|
done => {
|
||||||
const invalidName = 'my..examplebucket';
|
const invalidName = 'my..examplebucket';
|
||||||
testFn(invalidName, done);
|
testFn(invalidName, done);
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
|
|
||||||
it('should return 400 if name has special chars', done => {
|
it('should return 400 if name has special chars', done => {
|
||||||
|
|
|
@ -36,6 +36,6 @@ describe('aws-node-sdk stress test bucket', function testSuite() {
|
||||||
next => putObjects(s3, loopId, err => next(err)),
|
next => putObjects(s3, loopId, err => next(err)),
|
||||||
next => deleteObjects(s3, loopId, err => next(err)),
|
next => deleteObjects(s3, loopId, err => next(err)),
|
||||||
next => s3.deleteBucket({ Bucket: bucket }, err => next(err)),
|
next => s3.deleteBucket({ Bucket: bucket }, err => next(err)),
|
||||||
], err => next(err)), done)
|
], err => next(err)), done),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
|
@ -196,7 +196,7 @@ function testSuite() {
|
||||||
`in afterEach: ${err}\n`);
|
`in afterEach: ${err}\n`);
|
||||||
throw err;
|
throw err;
|
||||||
}
|
}
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
[{
|
[{
|
||||||
|
|
|
@ -45,7 +45,7 @@ describe('Abort MPU', () => {
|
||||||
UploadId: uploadId,
|
UploadId: uploadId,
|
||||||
}).promise()
|
}).promise()
|
||||||
.then(() => bucketUtil.empty(bucket))
|
.then(() => bucketUtil.empty(bucket))
|
||||||
.then(() => bucketUtil.deleteOne(bucket))
|
.then(() => bucketUtil.deleteOne(bucket)),
|
||||||
);
|
);
|
||||||
|
|
||||||
// aws-sdk now (v2.363.0) returns 'UriParameterError' error
|
// aws-sdk now (v2.363.0) returns 'UriParameterError' error
|
||||||
|
|
|
@ -46,7 +46,8 @@ describe('Tag condition keys updateRequestContext', () => {
|
||||||
assert.ifError(err);
|
assert.ifError(err);
|
||||||
assert(newRequestContexts[0].getNeedTagEval());
|
assert(newRequestContexts[0].getNeedTagEval());
|
||||||
assert.strictEqual(newRequestContexts[0].getExistingObjTag(), tagsToExist);
|
assert.strictEqual(newRequestContexts[0].getExistingObjTag(), tagsToExist);
|
||||||
assert.strictEqual(newRequestContexts[0].getRequestObjTags(), makeTagQuery(taggingUtil.getTags()));
|
assert.strictEqual(
|
||||||
|
newRequestContexts[0].getRequestObjTags(), makeTagQuery(taggingUtil.getTags()));
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -84,7 +84,7 @@ describe('large mpu', function tester() {
|
||||||
process.stdout.write('putting parts');
|
process.stdout.write('putting parts');
|
||||||
return timesLimit(partCount, 20, (n, cb) =>
|
return timesLimit(partCount, 20, (n, cb) =>
|
||||||
uploadPart(n, uploadId, s3, cb), err =>
|
uploadPart(n, uploadId, s3, cb), err =>
|
||||||
next(err)
|
next(err),
|
||||||
);
|
);
|
||||||
},
|
},
|
||||||
next => {
|
next => {
|
||||||
|
|
|
@ -99,7 +99,7 @@ describe('Complete MPU', () => {
|
||||||
.then(result => {
|
.then(result => {
|
||||||
uploadId = result.uploadId;
|
uploadId = result.uploadId;
|
||||||
eTag = result.eTag;
|
eTag = result.eTag;
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
it('should complete an MPU with fewer parts than were ' +
|
it('should complete an MPU with fewer parts than were ' +
|
||||||
|
@ -118,7 +118,7 @@ describe('Complete MPU', () => {
|
||||||
.then(result => {
|
.then(result => {
|
||||||
uploadId = result.uploadId;
|
uploadId = result.uploadId;
|
||||||
eTag = result.eTag;
|
eTag = result.eTag;
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
it('should complete an MPU with fewer parts than were ' +
|
it('should complete an MPU with fewer parts than were ' +
|
||||||
|
@ -137,7 +137,7 @@ describe('Complete MPU', () => {
|
||||||
.then(result => {
|
.then(result => {
|
||||||
uploadId = result.uploadId;
|
uploadId = result.uploadId;
|
||||||
eTag = result.eTag;
|
eTag = result.eTag;
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
it('should complete an MPU with fewer parts than were ' +
|
it('should complete an MPU with fewer parts than were ' +
|
||||||
|
@ -219,7 +219,7 @@ describe('Complete MPU', () => {
|
||||||
.then(result => {
|
.then(result => {
|
||||||
uploadId = result.uploadId;
|
uploadId = result.uploadId;
|
||||||
eTag = result.eTag;
|
eTag = result.eTag;
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
it('should complete the MPU successfully and leave a readable object', done => {
|
it('should complete the MPU successfully and leave a readable object', done => {
|
||||||
|
|
|
@ -49,7 +49,7 @@ describe('Object Part Copy', () => {
|
||||||
process.stdout.write(`Error creating source bucket: ${err}\n`);
|
process.stdout.write(`Error creating source bucket: ${err}\n`);
|
||||||
throw err;
|
throw err;
|
||||||
}).then(() =>
|
}).then(() =>
|
||||||
s3.createBucketPromise({ Bucket: destBucketName })
|
s3.createBucketPromise({ Bucket: destBucketName }),
|
||||||
).catch(err => {
|
).catch(err => {
|
||||||
process.stdout.write(`Error creating dest bucket: ${err}\n`);
|
process.stdout.write(`Error creating dest bucket: ${err}\n`);
|
||||||
throw err;
|
throw err;
|
||||||
|
@ -92,7 +92,7 @@ describe('Object Part Copy', () => {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.then(() => bucketUtil.deleteMany([sourceBucketName,
|
.then(() => bucketUtil.deleteMany([sourceBucketName,
|
||||||
destBucketName]))
|
destBucketName])),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
||||||
|
@ -751,7 +751,7 @@ describe('Object Part Copy', () => {
|
||||||
throw err;
|
throw err;
|
||||||
}
|
}
|
||||||
}).then(() => otherAccountBucketUtility
|
}).then(() => otherAccountBucketUtility
|
||||||
.deleteOne(otherAccountBucket))
|
.deleteOne(otherAccountBucket)),
|
||||||
);
|
);
|
||||||
|
|
||||||
it('should not allow an account without read persmission on the ' +
|
it('should not allow an account without read persmission on the ' +
|
||||||
|
|
|
@ -436,7 +436,7 @@ describe('Cross Origin Resource Sharing requests', () => {
|
||||||
assert.strictEqual(err, null,
|
assert.strictEqual(err, null,
|
||||||
`Unexpected err ${err} in beforeEach`);
|
`Unexpected err ${err} in beforeEach`);
|
||||||
done(err);
|
done(err);
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
afterEach(done =>
|
afterEach(done =>
|
||||||
|
@ -450,7 +450,7 @@ describe('Cross Origin Resource Sharing requests', () => {
|
||||||
}
|
}
|
||||||
return _waitForAWS(done);
|
return _waitForAWS(done);
|
||||||
});
|
});
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
it('should respond with CORS headers at website endpoint (GET)',
|
it('should respond with CORS headers at website endpoint (GET)',
|
||||||
|
|
|
@ -61,7 +61,7 @@ describe('DELETE multipart', () => {
|
||||||
.catch(err => {
|
.catch(err => {
|
||||||
process.stdout.write(`Error in beforeEach: ${err}\n`);
|
process.stdout.write(`Error in beforeEach: ${err}\n`);
|
||||||
throw err;
|
throw err;
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
afterEach(() => {
|
afterEach(() => {
|
||||||
|
@ -101,7 +101,7 @@ describe('DELETE multipart', () => {
|
||||||
PartNumber: 1,
|
PartNumber: 1,
|
||||||
UploadId: uploadId,
|
UploadId: uploadId,
|
||||||
});
|
});
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
it('should return 204 for abortMultipartUpload', done => {
|
it('should return 204 for abortMultipartUpload', done => {
|
||||||
|
|
|
@ -39,7 +39,7 @@ describe('DELETE object', () => {
|
||||||
PartNumber: i,
|
PartNumber: i,
|
||||||
Body: testfile,
|
Body: testfile,
|
||||||
UploadId: uploadId,
|
UploadId: uploadId,
|
||||||
}).promise()
|
}).promise(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return Promise.all(uploads);
|
return Promise.all(uploads);
|
||||||
|
|
|
@ -71,8 +71,8 @@ function hydrateSSEConfig({ algo: SSEAlgorithm, masterKeyId: KMSMasterKeyID }) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
}
|
},
|
||||||
)
|
),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,7 +92,7 @@ describe('per object encryption headers', () => {
|
||||||
assert.ifError(err);
|
assert.ifError(err);
|
||||||
kmsKeyId = keyId;
|
kmsKeyId = keyId;
|
||||||
done();
|
done();
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ describe('per object encryption headers', () => {
|
||||||
}
|
}
|
||||||
assert.deepStrictEqual(sseConfig, expected);
|
assert.deepStrictEqual(sseConfig, expected);
|
||||||
done();
|
done();
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
@ -161,9 +161,9 @@ describe('per object encryption headers', () => {
|
||||||
}
|
}
|
||||||
res.forEach(sseConfig => assert.deepStrictEqual(sseConfig, expected));
|
res.forEach(sseConfig => assert.deepStrictEqual(sseConfig, expected));
|
||||||
done();
|
done();
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
}
|
},
|
||||||
));
|
));
|
||||||
|
|
||||||
testCases
|
testCases
|
||||||
|
@ -202,7 +202,7 @@ describe('per object encryption headers', () => {
|
||||||
}
|
}
|
||||||
assert.deepStrictEqual(sseConfig, expected);
|
assert.deepStrictEqual(sseConfig, expected);
|
||||||
done();
|
done();
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -258,7 +258,7 @@ describe('per object encryption headers', () => {
|
||||||
}
|
}
|
||||||
assert.deepStrictEqual(sseConfig, expected);
|
assert.deepStrictEqual(sseConfig, expected);
|
||||||
done();
|
done();
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -74,7 +74,7 @@ describe('GET object', () => {
|
||||||
const md5HashExpected = crypto.createHash('md5');
|
const md5HashExpected = crypto.createHash('md5');
|
||||||
assert.strictEqual(
|
assert.strictEqual(
|
||||||
md5Hash.update(data.Body).digest('hex'),
|
md5Hash.update(data.Body).digest('hex'),
|
||||||
md5HashExpected.update(body).digest('hex')
|
md5HashExpected.update(body).digest('hex'),
|
||||||
);
|
);
|
||||||
return cb();
|
return cb();
|
||||||
});
|
});
|
||||||
|
@ -770,7 +770,7 @@ describe('GET object', () => {
|
||||||
const expected = Buffer.alloc(partSize).fill(num);
|
const expected = Buffer.alloc(partSize).fill(num);
|
||||||
assert.strictEqual(
|
assert.strictEqual(
|
||||||
md5Hash.update(data.Body).digest('hex'),
|
md5Hash.update(data.Body).digest('hex'),
|
||||||
md5HashExpected.update(expected).digest('hex')
|
md5HashExpected.update(expected).digest('hex'),
|
||||||
);
|
);
|
||||||
return done();
|
return done();
|
||||||
});
|
});
|
||||||
|
@ -790,7 +790,7 @@ describe('GET object', () => {
|
||||||
.fill(unOrderedPartNumbers[num - 1]);
|
.fill(unOrderedPartNumbers[num - 1]);
|
||||||
assert.strictEqual(
|
assert.strictEqual(
|
||||||
md5Hash.update(data.Body).digest('hex'),
|
md5Hash.update(data.Body).digest('hex'),
|
||||||
md5HashExpected.update(expected).digest('hex')
|
md5HashExpected.update(expected).digest('hex'),
|
||||||
);
|
);
|
||||||
return done();
|
return done();
|
||||||
});
|
});
|
||||||
|
@ -829,7 +829,7 @@ describe('GET object', () => {
|
||||||
const expected = Buffer.alloc(10);
|
const expected = Buffer.alloc(10);
|
||||||
assert.strictEqual(
|
assert.strictEqual(
|
||||||
md5Hash.update(data.Body).digest('hex'),
|
md5Hash.update(data.Body).digest('hex'),
|
||||||
md5HashExpected.update(expected).digest('hex')
|
md5HashExpected.update(expected).digest('hex'),
|
||||||
);
|
);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
|
@ -849,7 +849,7 @@ describe('GET object', () => {
|
||||||
const expected = Buffer.alloc(10);
|
const expected = Buffer.alloc(10);
|
||||||
assert.strictEqual(
|
assert.strictEqual(
|
||||||
md5Hash.update(data.Body).digest('hex'),
|
md5Hash.update(data.Body).digest('hex'),
|
||||||
md5HashExpected.update(expected).digest('hex')
|
md5HashExpected.update(expected).digest('hex'),
|
||||||
);
|
);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
|
|
|
@ -12,6 +12,10 @@ const bucket = 'mock-bucket-lock';
|
||||||
const unlockedBucket = 'mock-bucket-no-lock';
|
const unlockedBucket = 'mock-bucket-no-lock';
|
||||||
const key = 'mock-object-legalhold';
|
const key = 'mock-object-legalhold';
|
||||||
const keyNoHold = 'mock-object-no-legalhold';
|
const keyNoHold = 'mock-object-no-legalhold';
|
||||||
|
const nonExistingId = process.env.AWS_ON_AIR ?
|
||||||
|
'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' :
|
||||||
|
'3939393939393939393936493939393939393939756e6437';
|
||||||
|
|
||||||
|
|
||||||
describe('GET object legal hold', () => {
|
describe('GET object legal hold', () => {
|
||||||
withV4(sigCfg => {
|
withV4(sigCfg => {
|
||||||
|
@ -86,7 +90,7 @@ describe('GET object legal hold', () => {
|
||||||
s3.getObjectLegalHold({
|
s3.getObjectLegalHold({
|
||||||
Bucket: bucket,
|
Bucket: bucket,
|
||||||
Key: key,
|
Key: key,
|
||||||
VersionId: '000000000000',
|
VersionId: nonExistingId,
|
||||||
}, err => {
|
}, err => {
|
||||||
checkError(err, 'NoSuchVersion', 404);
|
checkError(err, 'NoSuchVersion', 404);
|
||||||
done();
|
done();
|
||||||
|
|
|
@ -117,7 +117,7 @@ describe('Part size tests with object head', () => {
|
||||||
partNumbers.forEach(part => {
|
partNumbers.forEach(part => {
|
||||||
it(`should return the size of part ${part + 1} ` +
|
it(`should return the size of part ${part + 1} ` +
|
||||||
`when --part-number is set to ${part + 1}`, done => {
|
`when --part-number is set to ${part + 1}`, done => {
|
||||||
const partNumber = Number.parseInt(part, 0) + 1;
|
const partNumber = Number.parseInt(part, 10) + 1;
|
||||||
const partSize = bodySize + partNumber;
|
const partSize = bodySize + partNumber;
|
||||||
|
|
||||||
s3.headObject({ Bucket: bucket, Key: object, PartNumber: partNumber }, (err, data) => {
|
s3.headObject({ Bucket: bucket, Key: object, PartNumber: partNumber }, (err, data) => {
|
||||||
|
|
|
@ -60,13 +60,13 @@ describe('aws-node-sdk range test of large end position', () => {
|
||||||
|
|
||||||
it('should get the final 90 bytes of a 2890 byte object for a byte ' +
|
it('should get the final 90 bytes of a 2890 byte object for a byte ' +
|
||||||
'range of 2800-',
|
'range of 2800-',
|
||||||
done => endRangeTest('bytes=2800-', 'bytes 2800-2889/2890', done)
|
done => endRangeTest('bytes=2800-', 'bytes 2800-2889/2890', done),
|
||||||
);
|
);
|
||||||
|
|
||||||
it('should get the final 90 bytes of a 2890 byte object for a byte ' +
|
it('should get the final 90 bytes of a 2890 byte object for a byte ' +
|
||||||
'range of 2800-Number.MAX_SAFE_INTEGER',
|
'range of 2800-Number.MAX_SAFE_INTEGER',
|
||||||
done => endRangeTest(`bytes=2800-${Number.MAX_SAFE_INTEGER}`,
|
done => endRangeTest(`bytes=2800-${Number.MAX_SAFE_INTEGER}`,
|
||||||
'bytes 2800-2889/2890', done)
|
'bytes 2800-2889/2890', done),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -13,6 +13,9 @@ const bucketName = 'lockenabledbucket';
|
||||||
const unlockedBucket = 'locknotenabledbucket';
|
const unlockedBucket = 'locknotenabledbucket';
|
||||||
const objectName = 'putobjectretentionobject';
|
const objectName = 'putobjectretentionobject';
|
||||||
const noRetentionObject = 'objectwithnoretention';
|
const noRetentionObject = 'objectwithnoretention';
|
||||||
|
const nonExistingId = process.env.AWS_ON_AIR ?
|
||||||
|
'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' :
|
||||||
|
'3939393939393939393936493939393939393939756e6437';
|
||||||
|
|
||||||
const retainDate = moment().add(1, 'days').toISOString();
|
const retainDate = moment().add(1, 'days').toISOString();
|
||||||
|
|
||||||
|
@ -106,7 +109,7 @@ describe('GET object retention', () => {
|
||||||
s3.getObjectRetention({
|
s3.getObjectRetention({
|
||||||
Bucket: bucketName,
|
Bucket: bucketName,
|
||||||
Key: objectName,
|
Key: objectName,
|
||||||
VersionId: '000000000000',
|
VersionId: nonExistingId,
|
||||||
}, err => {
|
}, err => {
|
||||||
checkError(err, 'NoSuchVersion', 404);
|
checkError(err, 'NoSuchVersion', 404);
|
||||||
done();
|
done();
|
||||||
|
|
|
@ -130,7 +130,7 @@ describe('List parts - object keys with special characters: `&`', () => {
|
||||||
.then(res => {
|
.then(res => {
|
||||||
uploadId = res;
|
uploadId = res;
|
||||||
return Promise.resolve();
|
return Promise.resolve();
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
afterEach(() => deletePart(s3, bucketUtil, key, uploadId));
|
afterEach(() => deletePart(s3, bucketUtil, key, uploadId));
|
||||||
|
@ -152,7 +152,7 @@ describe('List parts - object keys with special characters: `"`', () => {
|
||||||
.then(res => {
|
.then(res => {
|
||||||
uploadId = res;
|
uploadId = res;
|
||||||
return Promise.resolve();
|
return Promise.resolve();
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
afterEach(() => deletePart(s3, bucketUtil, key, uploadId));
|
afterEach(() => deletePart(s3, bucketUtil, key, uploadId));
|
||||||
|
@ -174,7 +174,7 @@ describe('List parts - object keys with special characters: `\'`', () => {
|
||||||
.then(res => {
|
.then(res => {
|
||||||
uploadId = res;
|
uploadId = res;
|
||||||
return Promise.resolve();
|
return Promise.resolve();
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
afterEach(() => deletePart(s3, bucketUtil, key, uploadId));
|
afterEach(() => deletePart(s3, bucketUtil, key, uploadId));
|
||||||
|
@ -196,7 +196,7 @@ describe('List parts - object keys with special characters: `<`', () => {
|
||||||
.then(res => {
|
.then(res => {
|
||||||
uploadId = res;
|
uploadId = res;
|
||||||
return Promise.resolve();
|
return Promise.resolve();
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
afterEach(() => deletePart(s3, bucketUtil, key, uploadId));
|
afterEach(() => deletePart(s3, bucketUtil, key, uploadId));
|
||||||
|
@ -218,7 +218,7 @@ describe('List parts - object keys with special characters: `>`', () => {
|
||||||
.then(res => {
|
.then(res => {
|
||||||
uploadId = res;
|
uploadId = res;
|
||||||
return Promise.resolve();
|
return Promise.resolve();
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
||||||
afterEach(() => deletePart(s3, bucketUtil, key, uploadId));
|
afterEach(() => deletePart(s3, bucketUtil, key, uploadId));
|
||||||
|
|
|
@ -108,12 +108,12 @@ describe('aws-node-sdk test suite of listMultipartUploads', () =>
|
||||||
UploadId: data.uploadId,
|
UploadId: data.uploadId,
|
||||||
}).promise()
|
}).promise()
|
||||||
.then(() => bucketUtil.empty(bucket))
|
.then(() => bucketUtil.empty(bucket))
|
||||||
.then(() => bucketUtil.deleteOne(bucket))
|
.then(() => bucketUtil.deleteOne(bucket)),
|
||||||
);
|
);
|
||||||
|
|
||||||
it('should list ongoing multipart uploads', () =>
|
it('should list ongoing multipart uploads', () =>
|
||||||
s3.listMultipartUploads({ Bucket: bucket }).promise()
|
s3.listMultipartUploads({ Bucket: bucket }).promise()
|
||||||
.then(res => checkValues(res, data))
|
.then(res => checkValues(res, data)),
|
||||||
);
|
);
|
||||||
|
|
||||||
it('should list ongoing multipart uploads with params', () => {
|
it('should list ongoing multipart uploads with params', () => {
|
||||||
|
@ -139,5 +139,5 @@ describe('aws-node-sdk test suite of listMultipartUploads', () =>
|
||||||
}).promise()
|
}).promise()
|
||||||
.then(res => checkValues(res, data));
|
.then(res => checkValues(res, data));
|
||||||
});
|
});
|
||||||
})
|
}),
|
||||||
);
|
);
|
||||||
|
|
|
@ -80,7 +80,7 @@ describe('Multi-Object Delete Success', function success() {
|
||||||
Bucket: bucketName,
|
Bucket: bucketName,
|
||||||
Key: key,
|
Key: key,
|
||||||
Body: 'somebody',
|
Body: 'somebody',
|
||||||
}).promise()
|
}).promise(),
|
||||||
);
|
);
|
||||||
queued.push(result);
|
queued.push(result);
|
||||||
return result;
|
return result;
|
||||||
|
|
|
@ -79,10 +79,10 @@ describe('Object Copy', () => {
|
||||||
s3 = bucketUtil.s3;
|
s3 = bucketUtil.s3;
|
||||||
return bucketUtil.empty(sourceBucketName)
|
return bucketUtil.empty(sourceBucketName)
|
||||||
.then(() =>
|
.then(() =>
|
||||||
bucketUtil.empty(destBucketName)
|
bucketUtil.empty(destBucketName),
|
||||||
)
|
)
|
||||||
.then(() =>
|
.then(() =>
|
||||||
bucketUtil.deleteMany([sourceBucketName, destBucketName])
|
bucketUtil.deleteMany([sourceBucketName, destBucketName]),
|
||||||
)
|
)
|
||||||
.catch(err => {
|
.catch(err => {
|
||||||
if (err.code !== 'NoSuchBucket') {
|
if (err.code !== 'NoSuchBucket') {
|
||||||
|
@ -90,9 +90,9 @@ describe('Object Copy', () => {
|
||||||
throw err;
|
throw err;
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.then(() => bucketUtil.createOne(sourceBucketName)
|
.then(() => bucketUtil.createOne(sourceBucketName),
|
||||||
)
|
)
|
||||||
.then(() => bucketUtil.createOne(destBucketName)
|
.then(() => bucketUtil.createOne(destBucketName),
|
||||||
)
|
)
|
||||||
.catch(err => {
|
.catch(err => {
|
||||||
throw err;
|
throw err;
|
||||||
|
@ -121,7 +121,7 @@ describe('Object Copy', () => {
|
||||||
}));
|
}));
|
||||||
|
|
||||||
afterEach(() => bucketUtil.empty(sourceBucketName)
|
afterEach(() => bucketUtil.empty(sourceBucketName)
|
||||||
.then(() => bucketUtil.empty(destBucketName))
|
.then(() => bucketUtil.empty(destBucketName)),
|
||||||
);
|
);
|
||||||
|
|
||||||
after(() => bucketUtil.deleteMany([sourceBucketName, destBucketName]));
|
after(() => bucketUtil.deleteMany([sourceBucketName, destBucketName]));
|
||||||
|
@ -179,7 +179,7 @@ describe('Object Copy', () => {
|
||||||
CopySource: `${sourceBucketName}/${sourceObjName}` },
|
CopySource: `${sourceBucketName}/${sourceObjName}` },
|
||||||
(err, res) =>
|
(err, res) =>
|
||||||
successCopyCheck(err, res, originalMetadata,
|
successCopyCheck(err, res, originalMetadata,
|
||||||
destBucketName, destObjName, done)
|
destBucketName, destObjName, done),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -330,7 +330,7 @@ describe('Object Copy', () => {
|
||||||
// Should remove V4 streaming value 'aws-chunked'
|
// Should remove V4 streaming value 'aws-chunked'
|
||||||
// to be compatible with AWS behavior
|
// to be compatible with AWS behavior
|
||||||
assert.strictEqual(res.ContentEncoding,
|
assert.strictEqual(res.ContentEncoding,
|
||||||
'base64,'
|
'base64,',
|
||||||
);
|
);
|
||||||
assert.strictEqual(res.Expires.toGMTString(),
|
assert.strictEqual(res.Expires.toGMTString(),
|
||||||
originalExpires.toGMTString());
|
originalExpires.toGMTString());
|
||||||
|
@ -346,7 +346,7 @@ describe('Object Copy', () => {
|
||||||
CopySource: `${sourceBucketName}/${sourceObjName}` },
|
CopySource: `${sourceBucketName}/${sourceObjName}` },
|
||||||
(err, res) =>
|
(err, res) =>
|
||||||
successCopyCheck(err, res, originalMetadata,
|
successCopyCheck(err, res, originalMetadata,
|
||||||
sourceBucketName, destObjName, done)
|
sourceBucketName, destObjName, done),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -406,7 +406,7 @@ describe('Object Copy', () => {
|
||||||
Metadata: newMetadata },
|
Metadata: newMetadata },
|
||||||
(err, res) =>
|
(err, res) =>
|
||||||
successCopyCheck(err, res, newMetadata,
|
successCopyCheck(err, res, newMetadata,
|
||||||
sourceBucketName, sourceObjName, done)
|
sourceBucketName, sourceObjName, done),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -419,7 +419,7 @@ describe('Object Copy', () => {
|
||||||
},
|
},
|
||||||
(err, res) =>
|
(err, res) =>
|
||||||
successCopyCheck(err, res, newMetadata,
|
successCopyCheck(err, res, newMetadata,
|
||||||
destBucketName, destObjName, done)
|
destBucketName, destObjName, done),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -824,12 +824,12 @@ describe('Object Copy', () => {
|
||||||
const otherAccountBucket = 'otheraccountbucket42342342342';
|
const otherAccountBucket = 'otheraccountbucket42342342342';
|
||||||
const otherAccountKey = 'key';
|
const otherAccountKey = 'key';
|
||||||
beforeEach(() => otherAccountBucketUtility
|
beforeEach(() => otherAccountBucketUtility
|
||||||
.createOne(otherAccountBucket)
|
.createOne(otherAccountBucket),
|
||||||
);
|
);
|
||||||
|
|
||||||
afterEach(() => otherAccountBucketUtility.empty(otherAccountBucket)
|
afterEach(() => otherAccountBucketUtility.empty(otherAccountBucket)
|
||||||
.then(() => otherAccountBucketUtility
|
.then(() => otherAccountBucketUtility
|
||||||
.deleteOne(otherAccountBucket))
|
.deleteOne(otherAccountBucket)),
|
||||||
);
|
);
|
||||||
|
|
||||||
it('should not allow an account without read persmission on the ' +
|
it('should not allow an account without read persmission on the ' +
|
||||||
|
|
|
@ -46,7 +46,7 @@ describe('HEAD object, conditions', () => {
|
||||||
bucketUtil = new BucketUtility('default', sigCfg);
|
bucketUtil = new BucketUtility('default', sigCfg);
|
||||||
s3 = bucketUtil.s3;
|
s3 = bucketUtil.s3;
|
||||||
return bucketUtil.empty(bucketName).then(() =>
|
return bucketUtil.empty(bucketName).then(() =>
|
||||||
bucketUtil.deleteOne(bucketName)
|
bucketUtil.deleteOne(bucketName),
|
||||||
)
|
)
|
||||||
.catch(err => {
|
.catch(err => {
|
||||||
if (err.code !== 'NoSuchBucket') {
|
if (err.code !== 'NoSuchBucket') {
|
||||||
|
|
|
@ -22,7 +22,7 @@ describe('HEAD object, compatibility headers [Cache-Control, ' +
|
||||||
bucketUtil = new BucketUtility('default', sigCfg);
|
bucketUtil = new BucketUtility('default', sigCfg);
|
||||||
s3 = bucketUtil.s3;
|
s3 = bucketUtil.s3;
|
||||||
return bucketUtil.empty(bucketName).then(() =>
|
return bucketUtil.empty(bucketName).then(() =>
|
||||||
bucketUtil.deleteOne(bucketName)
|
bucketUtil.deleteOne(bucketName),
|
||||||
)
|
)
|
||||||
.catch(err => {
|
.catch(err => {
|
||||||
if (err.code !== 'NoSuchBucket') {
|
if (err.code !== 'NoSuchBucket') {
|
||||||
|
|
|
@ -38,7 +38,7 @@ describe('Put object with same key as prior object', () => {
|
||||||
Body: 'I am the best content ever',
|
Body: 'I am the best content ever',
|
||||||
Metadata: firstPutMetadata,
|
Metadata: firstPutMetadata,
|
||||||
}).promise().then(() =>
|
}).promise().then(() =>
|
||||||
s3.headObject({ Bucket: bucketName, Key: objectName }).promise()
|
s3.headObject({ Bucket: bucketName, Key: objectName }).promise(),
|
||||||
).then(res => {
|
).then(res => {
|
||||||
assert.deepStrictEqual(res.Metadata, firstPutMetadata);
|
assert.deepStrictEqual(res.Metadata, firstPutMetadata);
|
||||||
}));
|
}));
|
||||||
|
@ -54,7 +54,7 @@ describe('Put object with same key as prior object', () => {
|
||||||
Body: 'Much different',
|
Body: 'Much different',
|
||||||
Metadata: secondPutMetadata,
|
Metadata: secondPutMetadata,
|
||||||
}).promise().then(() =>
|
}).promise().then(() =>
|
||||||
s3.getObject({ Bucket: bucketName, Key: objectName }).promise()
|
s3.getObject({ Bucket: bucketName, Key: objectName }).promise(),
|
||||||
).then(res => {
|
).then(res => {
|
||||||
assert.deepStrictEqual(res.Metadata, secondPutMetadata);
|
assert.deepStrictEqual(res.Metadata, secondPutMetadata);
|
||||||
assert.deepStrictEqual(res.Body.toString(),
|
assert.deepStrictEqual(res.Body.toString(),
|
||||||
|
|
|
@ -8,6 +8,9 @@ const changeObjectLock = require('../../../../utilities/objectLock-util');
|
||||||
const bucket = 'mock-bucket-lock';
|
const bucket = 'mock-bucket-lock';
|
||||||
const unlockedBucket = 'mock-bucket-no-lock';
|
const unlockedBucket = 'mock-bucket-no-lock';
|
||||||
const key = 'mock-object';
|
const key = 'mock-object';
|
||||||
|
const nonExistingId = process.env.AWS_ON_AIR ?
|
||||||
|
'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' :
|
||||||
|
'3939393939393939393936493939393939393939756e6437';
|
||||||
|
|
||||||
const mockLegalHold = {
|
const mockLegalHold = {
|
||||||
empty: {},
|
empty: {},
|
||||||
|
@ -98,7 +101,7 @@ describe('PUT object legal hold', () => {
|
||||||
s3.putObjectLegalHold({
|
s3.putObjectLegalHold({
|
||||||
Bucket: bucket,
|
Bucket: bucket,
|
||||||
Key: key,
|
Key: key,
|
||||||
VersionId: '000000000000',
|
VersionId: nonExistingId,
|
||||||
LegalHold: mockLegalHold.on,
|
LegalHold: mockLegalHold.on,
|
||||||
}, err => {
|
}, err => {
|
||||||
checkError(err, 'NoSuchVersion', 404);
|
checkError(err, 'NoSuchVersion', 404);
|
||||||
|
|
|
@ -9,6 +9,9 @@ const changeObjectLock = require('../../../../utilities/objectLock-util');
|
||||||
const bucketName = 'lockenabledputbucket';
|
const bucketName = 'lockenabledputbucket';
|
||||||
const unlockedBucket = 'locknotenabledputbucket';
|
const unlockedBucket = 'locknotenabledputbucket';
|
||||||
const objectName = 'putobjectretentionobject';
|
const objectName = 'putobjectretentionobject';
|
||||||
|
const nonExistingId = process.env.AWS_ON_AIR ?
|
||||||
|
'MhhyTHhmZ4cxSi4Y9SMe5P7UJAz7HLJ9' :
|
||||||
|
'3939393939393939393936493939393939393939756e6437';
|
||||||
|
|
||||||
const retentionConfig = {
|
const retentionConfig = {
|
||||||
Mode: 'GOVERNANCE',
|
Mode: 'GOVERNANCE',
|
||||||
|
@ -79,7 +82,7 @@ describe('PUT object retention', () => {
|
||||||
s3.putObjectRetention({
|
s3.putObjectRetention({
|
||||||
Bucket: bucketName,
|
Bucket: bucketName,
|
||||||
Key: objectName,
|
Key: objectName,
|
||||||
VersionId: '000000000000',
|
VersionId: nonExistingId,
|
||||||
Retention: retentionConfig,
|
Retention: retentionConfig,
|
||||||
}, err => {
|
}, err => {
|
||||||
checkError(err, 'NoSuchVersion', 404);
|
checkError(err, 'NoSuchVersion', 404);
|
||||||
|
|
|
@ -74,7 +74,7 @@ function uploadParts(bytes, uploadId) {
|
||||||
PartNumber: part,
|
PartNumber: part,
|
||||||
UploadId: uploadId,
|
UploadId: uploadId,
|
||||||
Body: createReadStream(`${name}.mpuPart${part}`),
|
Body: createReadStream(`${name}.mpuPart${part}`),
|
||||||
}).promise())
|
}).promise()),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -123,7 +123,7 @@ describe('aws-node-sdk range tests', () => {
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
},
|
},
|
||||||
}).promise())
|
}).promise()),
|
||||||
);
|
);
|
||||||
|
|
||||||
afterEach(() => bucketUtil.empty(bucket)
|
afterEach(() => bucketUtil.empty(bucket)
|
||||||
|
@ -139,7 +139,7 @@ describe('aws-node-sdk range tests', () => {
|
||||||
resolve();
|
resolve();
|
||||||
}))
|
}))
|
||||||
.then(() => bucketUtil.deleteOne(bucket))
|
.then(() => bucketUtil.deleteOne(bucket))
|
||||||
.then(() => execAsync(`rm hashedFile.${fileSize}*`))
|
.then(() => execAsync(`rm hashedFile.${fileSize}*`)),
|
||||||
);
|
);
|
||||||
|
|
||||||
it('should get a range from the first part of an object', () =>
|
it('should get a range from the first part of an object', () =>
|
||||||
|
|
|
@ -64,8 +64,8 @@ describeFn('GET Service - AWS.S3.listBuckets', function getService() {
|
||||||
accessKeyId: 'wrong',
|
accessKeyId: 'wrong',
|
||||||
secretAccessKey: 'wrong again',
|
secretAccessKey: 'wrong again',
|
||||||
},
|
},
|
||||||
sigCfg
|
sigCfg,
|
||||||
)
|
),
|
||||||
);
|
);
|
||||||
const expectedCode = 'InvalidAccessKeyId';
|
const expectedCode = 'InvalidAccessKeyId';
|
||||||
const expectedStatus = 403;
|
const expectedStatus = 403;
|
||||||
|
@ -165,7 +165,7 @@ describeFn('GET Service - AWS.S3.listBuckets', function getService() {
|
||||||
})
|
})
|
||||||
.then(data => {
|
.then(data => {
|
||||||
const buckets = data.Buckets.filter(bucket =>
|
const buckets = data.Buckets.filter(bucket =>
|
||||||
createdBuckets.indexOf(bucket.Name) > -1
|
createdBuckets.indexOf(bucket.Name) > -1,
|
||||||
);
|
);
|
||||||
|
|
||||||
assert.equal(buckets.length, createdBuckets.length,
|
assert.equal(buckets.length, createdBuckets.length,
|
||||||
|
|
|
@ -117,7 +117,7 @@ describe('Object Version Copy', () => {
|
||||||
lastModified = res.LastModified;
|
lastModified = res.LastModified;
|
||||||
}).then(() => s3.putObject({ Bucket: sourceBucketName,
|
}).then(() => s3.putObject({ Bucket: sourceBucketName,
|
||||||
Key: sourceObjName,
|
Key: sourceObjName,
|
||||||
Body: secondContent }).promise())
|
Body: secondContent }).promise()),
|
||||||
);
|
);
|
||||||
|
|
||||||
afterEach(done => async.parallel([
|
afterEach(done => async.parallel([
|
||||||
|
@ -257,7 +257,7 @@ describe('Object Version Copy', () => {
|
||||||
CopySource: copySource },
|
CopySource: copySource },
|
||||||
(err, res) =>
|
(err, res) =>
|
||||||
successCopyCheck(err, res, originalMetadata,
|
successCopyCheck(err, res, originalMetadata,
|
||||||
destBucketName, destObjName, done)
|
destBucketName, destObjName, done),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -281,7 +281,7 @@ describe('Object Version Copy', () => {
|
||||||
// Should remove V4 streaming value 'aws-chunked'
|
// Should remove V4 streaming value 'aws-chunked'
|
||||||
// to be compatible with AWS behavior
|
// to be compatible with AWS behavior
|
||||||
assert.strictEqual(res.ContentEncoding,
|
assert.strictEqual(res.ContentEncoding,
|
||||||
'base64,'
|
'base64,',
|
||||||
);
|
);
|
||||||
assert.strictEqual(res.Expires.toGMTString(),
|
assert.strictEqual(res.Expires.toGMTString(),
|
||||||
originalExpires.toGMTString());
|
originalExpires.toGMTString());
|
||||||
|
@ -297,7 +297,7 @@ describe('Object Version Copy', () => {
|
||||||
CopySource: copySource },
|
CopySource: copySource },
|
||||||
(err, res) =>
|
(err, res) =>
|
||||||
successCopyCheck(err, res, originalMetadata,
|
successCopyCheck(err, res, originalMetadata,
|
||||||
sourceBucketName, destObjName, done)
|
sourceBucketName, destObjName, done),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -309,7 +309,7 @@ describe('Object Version Copy', () => {
|
||||||
Metadata: newMetadata },
|
Metadata: newMetadata },
|
||||||
(err, res) =>
|
(err, res) =>
|
||||||
successCopyCheck(err, res, newMetadata,
|
successCopyCheck(err, res, newMetadata,
|
||||||
sourceBucketName, sourceObjName, done)
|
sourceBucketName, sourceObjName, done),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -322,7 +322,7 @@ describe('Object Version Copy', () => {
|
||||||
},
|
},
|
||||||
(err, res) =>
|
(err, res) =>
|
||||||
successCopyCheck(err, res, newMetadata,
|
successCopyCheck(err, res, newMetadata,
|
||||||
destBucketName, destObjName, done)
|
destBucketName, destObjName, done),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -637,7 +637,7 @@ describe('Object Version Copy', () => {
|
||||||
CopySource: copySource },
|
CopySource: copySource },
|
||||||
(err, res) =>
|
(err, res) =>
|
||||||
successCopyCheck(err, res, originalMetadata,
|
successCopyCheck(err, res, originalMetadata,
|
||||||
sourceBucketName, sourceObjName, done)
|
sourceBucketName, sourceObjName, done),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -749,12 +749,12 @@ describe('Object Version Copy', () => {
|
||||||
const otherAccountBucket = 'otheraccountbucket42342342342';
|
const otherAccountBucket = 'otheraccountbucket42342342342';
|
||||||
const otherAccountKey = 'key';
|
const otherAccountKey = 'key';
|
||||||
beforeEach(() => otherAccountBucketUtility
|
beforeEach(() => otherAccountBucketUtility
|
||||||
.createOne(otherAccountBucket)
|
.createOne(otherAccountBucket),
|
||||||
);
|
);
|
||||||
|
|
||||||
afterEach(() => otherAccountBucketUtility.empty(otherAccountBucket)
|
afterEach(() => otherAccountBucketUtility.empty(otherAccountBucket)
|
||||||
.then(() => otherAccountBucketUtility
|
.then(() => otherAccountBucketUtility
|
||||||
.deleteOne(otherAccountBucket))
|
.deleteOne(otherAccountBucket)),
|
||||||
);
|
);
|
||||||
|
|
||||||
it('should not allow an account without read persmission on the ' +
|
it('should not allow an account without read persmission on the ' +
|
||||||
|
|
|
@ -127,6 +127,6 @@ describe('Healthcheck stats', () => {
|
||||||
assert.deepStrictEqual(JSON.parse(res), expectedStatsRes);
|
assert.deepStrictEqual(JSON.parse(res), expectedStatsRes);
|
||||||
return done();
|
return done();
|
||||||
});
|
});
|
||||||
}, 500)
|
}, 500),
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
'use strict'; // eslint-disable-line strict
|
||||||
const assert = require('assert');
|
const assert = require('assert');
|
||||||
const { errors } = require('arsenal');
|
|
||||||
const DummyRequestLogger = require('../unit/helpers').DummyRequestLogger;
|
const DummyRequestLogger = require('../unit/helpers').DummyRequestLogger;
|
||||||
const clientCheck
|
const clientCheck
|
||||||
= require('../../lib/utilities/healthcheckHandler').clientCheck;
|
= require('../../lib/utilities/healthcheckHandler').clientCheck;
|
||||||
|
@ -71,7 +70,7 @@ describe('Healthcheck response', () => {
|
||||||
const azureLocationNonExistContainerError =
|
const azureLocationNonExistContainerError =
|
||||||
results[azureLocationNonExistContainer].error;
|
results[azureLocationNonExistContainer].error;
|
||||||
if (err) {
|
if (err) {
|
||||||
assert.strictEqual(err, errors.InternalError,
|
assert(err.is.InternalError,
|
||||||
`got unexpected err in clientCheck: ${err}`);
|
`got unexpected err in clientCheck: ${err}`);
|
||||||
assert(azureLocationNonExistContainerError.startsWith(
|
assert(azureLocationNonExistContainerError.startsWith(
|
||||||
'The specified container is being deleted.'));
|
'The specified container is being deleted.'));
|
||||||
|
|
|
@ -2,7 +2,7 @@ const assert = require('assert');
|
||||||
const async = require('async');
|
const async = require('async');
|
||||||
const AWS = require('aws-sdk');
|
const AWS = require('aws-sdk');
|
||||||
const { parseString } = require('xml2js');
|
const { parseString } = require('xml2js');
|
||||||
const { errors, models } = require('arsenal');
|
const { models } = require('arsenal');
|
||||||
|
|
||||||
const BucketInfo = models.BucketInfo;
|
const BucketInfo = models.BucketInfo;
|
||||||
const { getRealAwsConfig } =
|
const { getRealAwsConfig } =
|
||||||
|
@ -460,11 +460,12 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() {
|
||||||
abortMPU(uploadId, getAwsParams(objectKey), () => {
|
abortMPU(uploadId, getAwsParams(objectKey), () => {
|
||||||
const listParams = getListParams(objectKey, uploadId);
|
const listParams = getListParams(objectKey, uploadId);
|
||||||
listParts(authInfo, listParams, log, err => {
|
listParts(authInfo, listParams, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.ServiceUnavailable
|
const wantedDesc = 'Error returned from AWS: ' +
|
||||||
.customizeDescription('Error returned from AWS: ' +
|
|
||||||
'The specified upload does not exist. The upload ID ' +
|
'The specified upload does not exist. The upload ID ' +
|
||||||
'may be invalid, or the upload may have been aborted' +
|
'may be invalid, or the upload may have been aborted' +
|
||||||
'or completed.'));
|
' or completed.';
|
||||||
|
assert.strictEqual(err.is.ServiceUnavailable, true);
|
||||||
|
assert.deepStrictEqual(err.description, wantedDesc);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -513,7 +514,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() {
|
||||||
const fakeKey = `key-${Date.now()}`;
|
const fakeKey = `key-${Date.now()}`;
|
||||||
const delParams = getDeleteParams(fakeKey, fakeUploadId);
|
const delParams = getDeleteParams(fakeKey, fakeUploadId);
|
||||||
multipartDelete(authInfo, delParams, log, err => {
|
multipartDelete(authInfo, delParams, log, err => {
|
||||||
assert.equal(err, errors.NoSuchUpload,
|
assert.strictEqual(err.is.NoSuchUpload, true,
|
||||||
`Error aborting MPU: ${err}`);
|
`Error aborting MPU: ${err}`);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
|
@ -639,7 +640,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() {
|
||||||
const compParams = getCompleteParams(objectKey, uploadId);
|
const compParams = getCompleteParams(objectKey, uploadId);
|
||||||
compParams.post = errorBody;
|
compParams.post = errorBody;
|
||||||
completeMultipartUpload(authInfo, compParams, log, err => {
|
completeMultipartUpload(authInfo, compParams, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.InvalidPart);
|
assert.strictEqual(err.is.InvalidPart, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -661,7 +662,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() {
|
||||||
const compParams = getCompleteParams(objectKey, uploadId);
|
const compParams = getCompleteParams(objectKey, uploadId);
|
||||||
compParams.post = errorBody;
|
compParams.post = errorBody;
|
||||||
completeMultipartUpload(authInfo, compParams, log, err => {
|
completeMultipartUpload(authInfo, compParams, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.InvalidPartOrder);
|
assert.strictEqual(err.is.InvalidPartOrder, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -687,7 +688,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() {
|
||||||
const compParams = getCompleteParams(objectKey, uploadId);
|
const compParams = getCompleteParams(objectKey, uploadId);
|
||||||
compParams.post = errorBody;
|
compParams.post = errorBody;
|
||||||
completeMultipartUpload(authInfo, compParams, log, err => {
|
completeMultipartUpload(authInfo, compParams, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.EntityTooSmall);
|
assert.strictEqual(err.is.EntityTooSmall, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -825,7 +826,7 @@ describe('Multipart Upload API with AWS Backend', function mpuTestSuite() {
|
||||||
(uploadId, next) => {
|
(uploadId, next) => {
|
||||||
const listParams = getListParams(objectKey, uploadId);
|
const listParams = getListParams(objectKey, uploadId);
|
||||||
listParts(authInfo, listParams, log, err => {
|
listParts(authInfo, listParams, log, err => {
|
||||||
assert(err.NoSuchUpload);
|
assert.strictEqual(err.is.NoSuchUpload, true);
|
||||||
next();
|
next();
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
|
@ -58,7 +58,7 @@ function put(bucketLoc, objLoc, requestHost, cb, errorDescription) {
|
||||||
resHeaders) => {
|
resHeaders) => {
|
||||||
if (errorDescription) {
|
if (errorDescription) {
|
||||||
assert.strictEqual(err.code, 400);
|
assert.strictEqual(err.code, 400);
|
||||||
assert(err.InvalidArgument);
|
assert(err.is.InvalidArgument);
|
||||||
assert(err.description.indexOf(errorDescription) > -1);
|
assert(err.description.indexOf(errorDescription) > -1);
|
||||||
} else {
|
} else {
|
||||||
assert.strictEqual(err, null, `Error putting object: ${err}`);
|
assert.strictEqual(err, null, `Error putting object: ${err}`);
|
||||||
|
|
|
@ -2,6 +2,7 @@ const assert = require('assert');
|
||||||
const async = require('async');
|
const async = require('async');
|
||||||
const { parseString } = require('xml2js');
|
const { parseString } = require('xml2js');
|
||||||
const AWS = require('aws-sdk');
|
const AWS = require('aws-sdk');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
const { cleanup, DummyRequestLogger, makeAuthInfo }
|
const { cleanup, DummyRequestLogger, makeAuthInfo }
|
||||||
= require('../unit/helpers');
|
= require('../unit/helpers');
|
||||||
|
@ -147,8 +148,7 @@ errorPutCopyPart) {
|
||||||
return objectPutCopyPart(authInfo, copyPartReq,
|
return objectPutCopyPart(authInfo, copyPartReq,
|
||||||
bucketName, sourceObjName, undefined, log, (err, copyResult) => {
|
bucketName, sourceObjName, undefined, log, (err, copyResult) => {
|
||||||
if (errorPutCopyPart) {
|
if (errorPutCopyPart) {
|
||||||
assert.strictEqual(err.code, errorPutCopyPart.statusCode);
|
assert.strictEqual(err.is[errorPutCopyPart.type], true);
|
||||||
assert(err[errorPutCopyPart.code]);
|
|
||||||
return cb();
|
return cb();
|
||||||
}
|
}
|
||||||
assert.strictEqual(err, null);
|
assert.strictEqual(err, null);
|
||||||
|
@ -293,9 +293,8 @@ function testSuite() {
|
||||||
it('should return error 403 AccessDenied copying part to a ' +
|
it('should return error 403 AccessDenied copying part to a ' +
|
||||||
'different AWS location without object READ access',
|
'different AWS location without object READ access',
|
||||||
done => {
|
done => {
|
||||||
const errorPutCopyPart = { code: 'AccessDenied', statusCode: 403 };
|
|
||||||
copyPutPart(null, awsLocation, awsLocation2, 'localhost', done,
|
copyPutPart(null, awsLocation, awsLocation2, 'localhost', done,
|
||||||
errorPutCopyPart);
|
errors.AccessDenied);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -99,7 +99,7 @@ errorDescription) {
|
||||||
(err, json) => {
|
(err, json) => {
|
||||||
if (errorDescription) {
|
if (errorDescription) {
|
||||||
assert.strictEqual(err.code, 400);
|
assert.strictEqual(err.code, 400);
|
||||||
assert(err.InvalidArgument);
|
assert(err.is.InvalidArgument);
|
||||||
assert(err.description.indexOf(errorDescription) > -1);
|
assert(err.description.indexOf(errorDescription) > -1);
|
||||||
return cb();
|
return cb();
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,7 +37,7 @@ describe('objectLockHelpers: validateHeaders', () => {
|
||||||
= validateHeaders(objLockDisabledBucketInfo, headers, log);
|
= validateHeaders(objLockDisabledBucketInfo, headers, log);
|
||||||
const expectedError = errors.InvalidRequest.customizeDescription(
|
const expectedError = errors.InvalidRequest.customizeDescription(
|
||||||
'Bucket is missing ObjectLockConfiguration');
|
'Bucket is missing ObjectLockConfiguration');
|
||||||
assert.strictEqual(objectLockValidationError.InvalidRequest, true);
|
assert.strictEqual(objectLockValidationError.is.InvalidRequest, true);
|
||||||
assert.strictEqual(objectLockValidationError.description,
|
assert.strictEqual(objectLockValidationError.description,
|
||||||
expectedError.description);
|
expectedError.description);
|
||||||
});
|
});
|
||||||
|
@ -90,7 +90,7 @@ describe('objectLockHelpers: validateHeaders', () => {
|
||||||
const expectedError = errors.InvalidArgument.customizeDescription(
|
const expectedError = errors.InvalidArgument.customizeDescription(
|
||||||
'x-amz-object-lock-retain-until-date and x-amz-object-lock-mode ' +
|
'x-amz-object-lock-retain-until-date and x-amz-object-lock-mode ' +
|
||||||
'must both be supplied');
|
'must both be supplied');
|
||||||
assert.strictEqual(objectLockValidationError.InvalidArgument, true);
|
assert.strictEqual(objectLockValidationError.is.InvalidArgument, true);
|
||||||
assert.strictEqual(objectLockValidationError.description,
|
assert.strictEqual(objectLockValidationError.description,
|
||||||
expectedError.description);
|
expectedError.description);
|
||||||
});
|
});
|
||||||
|
@ -104,7 +104,7 @@ describe('objectLockHelpers: validateHeaders', () => {
|
||||||
const expectedError = errors.InvalidArgument.customizeDescription(
|
const expectedError = errors.InvalidArgument.customizeDescription(
|
||||||
'x-amz-object-lock-retain-until-date and x-amz-object-lock-mode ' +
|
'x-amz-object-lock-retain-until-date and x-amz-object-lock-mode ' +
|
||||||
'must both be supplied');
|
'must both be supplied');
|
||||||
assert.strictEqual(objectLockValidationError.InvalidArgument, true);
|
assert.strictEqual(objectLockValidationError.is.InvalidArgument, true);
|
||||||
assert.strictEqual(objectLockValidationError.description,
|
assert.strictEqual(objectLockValidationError.description,
|
||||||
expectedError.description);
|
expectedError.description);
|
||||||
});
|
});
|
||||||
|
@ -118,7 +118,7 @@ describe('objectLockHelpers: validateHeaders', () => {
|
||||||
'The retain until date must be in the future!');
|
'The retain until date must be in the future!');
|
||||||
const objectLockValidationError
|
const objectLockValidationError
|
||||||
= validateHeaders(bucketInfo, headers, log);
|
= validateHeaders(bucketInfo, headers, log);
|
||||||
assert.strictEqual(objectLockValidationError.InvalidArgument, true);
|
assert.strictEqual(objectLockValidationError.is.InvalidArgument, true);
|
||||||
assert.strictEqual(objectLockValidationError.description,
|
assert.strictEqual(objectLockValidationError.description,
|
||||||
expectedError.description);
|
expectedError.description);
|
||||||
});
|
});
|
||||||
|
@ -131,7 +131,7 @@ describe('objectLockHelpers: validateHeaders', () => {
|
||||||
= validateHeaders(bucketInfo, headers, log);
|
= validateHeaders(bucketInfo, headers, log);
|
||||||
const expectedError = errors.InvalidArgument.customizeDescription(
|
const expectedError = errors.InvalidArgument.customizeDescription(
|
||||||
'Legal hold status must be one of "ON", "OFF"');
|
'Legal hold status must be one of "ON", "OFF"');
|
||||||
assert.strictEqual(objectLockValidationError.InvalidArgument, true);
|
assert.strictEqual(objectLockValidationError.is.InvalidArgument, true);
|
||||||
assert.strictEqual(objectLockValidationError.description,
|
assert.strictEqual(objectLockValidationError.description,
|
||||||
expectedError.description);
|
expectedError.description);
|
||||||
});
|
});
|
||||||
|
@ -145,7 +145,7 @@ describe('objectLockHelpers: validateHeaders', () => {
|
||||||
= validateHeaders(bucketInfo, headers, log);
|
= validateHeaders(bucketInfo, headers, log);
|
||||||
const expectedError = errors.InvalidArgument.customizeDescription(
|
const expectedError = errors.InvalidArgument.customizeDescription(
|
||||||
'Unknown wormMode directive');
|
'Unknown wormMode directive');
|
||||||
assert.strictEqual(objectLockValidationError.InvalidArgument, true);
|
assert.strictEqual(objectLockValidationError.is.InvalidArgument, true);
|
||||||
assert.strictEqual(objectLockValidationError.description,
|
assert.strictEqual(objectLockValidationError.description,
|
||||||
expectedError.description);
|
expectedError.description);
|
||||||
});
|
});
|
||||||
|
@ -207,7 +207,7 @@ describe('objectLockHelpers: validateObjectLockUpdate', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
const error = validateObjectLockUpdate(objMD, retentionInfo, false);
|
const error = validateObjectLockUpdate(objMD, retentionInfo, false);
|
||||||
assert.deepStrictEqual(error, errors.AccessDenied);
|
assert.strictEqual(error.is.AccessDenied, true);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should disallow COMPLIANCE => GOVERNANCE if retention is not expired', () => {
|
it('should disallow COMPLIANCE => GOVERNANCE if retention is not expired', () => {
|
||||||
|
@ -222,7 +222,7 @@ describe('objectLockHelpers: validateObjectLockUpdate', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
const error = validateObjectLockUpdate(objMD, retentionInfo);
|
const error = validateObjectLockUpdate(objMD, retentionInfo);
|
||||||
assert.deepStrictEqual(error, errors.AccessDenied);
|
assert.strictEqual(error.is.AccessDenied, true);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should allow COMPLIANCE => GOVERNANCE if retention is expired', () => {
|
it('should allow COMPLIANCE => GOVERNANCE if retention is expired', () => {
|
||||||
|
@ -267,7 +267,7 @@ describe('objectLockHelpers: validateObjectLockUpdate', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
const error = validateObjectLockUpdate(objMD, retentionInfo);
|
const error = validateObjectLockUpdate(objMD, retentionInfo);
|
||||||
assert.deepStrictEqual(error, errors.AccessDenied);
|
assert.strictEqual(error.is.AccessDenied, true);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should allow shortening retention period if in GOVERNANCE', () => {
|
it('should allow shortening retention period if in GOVERNANCE', () => {
|
||||||
|
|
|
@ -251,8 +251,8 @@ describe('versioning helpers', () => {
|
||||||
// result
|
// result
|
||||||
const res = JSON.parse(
|
const res = JSON.parse(
|
||||||
JSON.stringify(
|
JSON.stringify(
|
||||||
processVersioningState(mst, versioningStatus)
|
processVersioningState(mst, versioningStatus),
|
||||||
)
|
),
|
||||||
);
|
);
|
||||||
const expectedRes =
|
const expectedRes =
|
||||||
testCase[`versioning${versioningStatus}ExpectedRes`];
|
testCase[`versioning${versioningStatus}ExpectedRes`];
|
||||||
|
@ -425,7 +425,7 @@ describe('versioning helpers', () => {
|
||||||
'foobucket', mockBucketMD, testCase.objMD,
|
'foobucket', mockBucketMD, testCase.objMD,
|
||||||
testCase.reqVersionId, null, (err, options) => {
|
testCase.reqVersionId, null, (err, options) => {
|
||||||
if (testCase.expectedError) {
|
if (testCase.expectedError) {
|
||||||
assert.strictEqual(err, testCase.expectedError);
|
assert.strictEqual(err.is[testCase.expectedError.type], true);
|
||||||
} else {
|
} else {
|
||||||
assert.ifError(err);
|
assert.ifError(err);
|
||||||
assert.deepStrictEqual(options, testCase.expectedRes);
|
assert.deepStrictEqual(options, testCase.expectedRes);
|
||||||
|
|
|
@ -2,7 +2,6 @@ const crypto = require('crypto');
|
||||||
const assert = require('assert');
|
const assert = require('assert');
|
||||||
const async = require('async');
|
const async = require('async');
|
||||||
const { parseString } = require('xml2js');
|
const { parseString } = require('xml2js');
|
||||||
const { errors } = require('arsenal');
|
|
||||||
|
|
||||||
const bucketDelete = require('../../../lib/api/bucketDelete');
|
const bucketDelete = require('../../../lib/api/bucketDelete');
|
||||||
const { bucketPut } = require('../../../lib/api/bucketPut');
|
const { bucketPut } = require('../../../lib/api/bucketPut');
|
||||||
|
@ -112,7 +111,7 @@ describe('bucketDelete API', () => {
|
||||||
objectPut(authInfo, testPutObjectRequest, undefined, log, err => {
|
objectPut(authInfo, testPutObjectRequest, undefined, log, err => {
|
||||||
assert.strictEqual(err, null);
|
assert.strictEqual(err, null);
|
||||||
bucketDelete(authInfo, testRequest, log, err => {
|
bucketDelete(authInfo, testRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.BucketNotEmpty);
|
assert.strictEqual(err.is.BucketNotEmpty, true);
|
||||||
metadata.getBucket(bucketName, log, (err, md) => {
|
metadata.getBucket(bucketName, log, (err, md) => {
|
||||||
assert.strictEqual(md.getName(), bucketName);
|
assert.strictEqual(md.getName(), bucketName);
|
||||||
metadata.listObject(usersBucket,
|
metadata.listObject(usersBucket,
|
||||||
|
@ -146,7 +145,7 @@ describe('bucketDelete API', () => {
|
||||||
bucketPut(authInfo, testRequest, log, () => {
|
bucketPut(authInfo, testRequest, log, () => {
|
||||||
bucketDelete(authInfo, testRequest, log, () => {
|
bucketDelete(authInfo, testRequest, log, () => {
|
||||||
metadata.getBucket(bucketName, log, (err, md) => {
|
metadata.getBucket(bucketName, log, (err, md) => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
assert.strictEqual(md, undefined);
|
assert.strictEqual(md, undefined);
|
||||||
metadata.listObject(usersBucket, { prefix: canonicalID },
|
metadata.listObject(usersBucket, { prefix: canonicalID },
|
||||||
log, (err, listResponse) => {
|
log, (err, listResponse) => {
|
||||||
|
@ -169,7 +168,7 @@ describe('bucketDelete API', () => {
|
||||||
it('should prevent anonymous user delete bucket API access', done => {
|
it('should prevent anonymous user delete bucket API access', done => {
|
||||||
const publicAuthInfo = makeAuthInfo(constants.publicId);
|
const publicAuthInfo = makeAuthInfo(constants.publicId);
|
||||||
bucketDelete(publicAuthInfo, testRequest, log, err => {
|
bucketDelete(publicAuthInfo, testRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.AccessDenied);
|
assert.strictEqual(err.is.AccessDenied, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -10,8 +10,6 @@ const objectPut = require('../../../lib/api/objectPut');
|
||||||
const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers');
|
const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers');
|
||||||
const DummyRequest = require('../DummyRequest');
|
const DummyRequest = require('../DummyRequest');
|
||||||
|
|
||||||
const { errors } = require('arsenal');
|
|
||||||
|
|
||||||
const authInfo = makeAuthInfo('accessKey1');
|
const authInfo = makeAuthInfo('accessKey1');
|
||||||
const bucketName = 'bucketname';
|
const bucketName = 'bucketname';
|
||||||
const delimiter = '/';
|
const delimiter = '/';
|
||||||
|
@ -114,7 +112,7 @@ const tests = [
|
||||||
query: { 'max-keys': '1' },
|
query: { 'max-keys': '1' },
|
||||||
url: baseUrl,
|
url: baseUrl,
|
||||||
},
|
},
|
||||||
baseGetRequest
|
baseGetRequest,
|
||||||
),
|
),
|
||||||
assertion: result => {
|
assertion: result => {
|
||||||
assert.strictEqual(result.ListBucketResult.Contents[0].Key[0],
|
assert.strictEqual(result.ListBucketResult.Contents[0].Key[0],
|
||||||
|
@ -122,7 +120,7 @@ const tests = [
|
||||||
assert.strictEqual(result.ListBucketResult.Contents[1], undefined);
|
assert.strictEqual(result.ListBucketResult.Contents[1], undefined);
|
||||||
assert.strictEqual(
|
assert.strictEqual(
|
||||||
result.ListBucketResult.NextContinuationToken[0],
|
result.ListBucketResult.NextContinuationToken[0],
|
||||||
'aW52YWxpZFVSSX5+fmI='
|
'aW52YWxpZFVSSX5+fmI=',
|
||||||
);
|
);
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -133,7 +131,7 @@ const tests = [
|
||||||
query: { 'encoding-type': 'url', 'max-keys': '1' },
|
query: { 'encoding-type': 'url', 'max-keys': '1' },
|
||||||
url: baseUrl,
|
url: baseUrl,
|
||||||
},
|
},
|
||||||
baseGetRequest
|
baseGetRequest,
|
||||||
),
|
),
|
||||||
assertion: result => {
|
assertion: result => {
|
||||||
assert.strictEqual(result.ListBucketResult.Contents[0].Key[0],
|
assert.strictEqual(result.ListBucketResult.Contents[0].Key[0],
|
||||||
|
@ -141,7 +139,7 @@ const tests = [
|
||||||
assert.strictEqual(result.ListBucketResult.Contents[1], undefined);
|
assert.strictEqual(result.ListBucketResult.Contents[1], undefined);
|
||||||
assert.strictEqual(
|
assert.strictEqual(
|
||||||
result.ListBucketResult.NextContinuationToken[0],
|
result.ListBucketResult.NextContinuationToken[0],
|
||||||
'aW52YWxpZFVSSX5+fmI='
|
'aW52YWxpZFVSSX5+fmI=',
|
||||||
);
|
);
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -199,7 +197,7 @@ describe('bucketGet API', () => {
|
||||||
const testGetRequest = Object.assign({ query: { 'max-keys': '-1' } },
|
const testGetRequest = Object.assign({ query: { 'max-keys': '-1' } },
|
||||||
baseGetRequest);
|
baseGetRequest);
|
||||||
bucketGet(authInfo, testGetRequest, log, err => {
|
bucketGet(authInfo, testGetRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.InvalidArgument);
|
assert.strictEqual(err.is.InvalidArgument, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -28,7 +28,7 @@ describe('getBucketLifecycle API', () => {
|
||||||
'bucket has no lifecycle', done => {
|
'bucket has no lifecycle', done => {
|
||||||
const lifecycleRequest = getLifecycleRequest(bucketName);
|
const lifecycleRequest = getLifecycleRequest(bucketName);
|
||||||
bucketGetLifecycle(authInfo, lifecycleRequest, log, err => {
|
bucketGetLifecycle(authInfo, lifecycleRequest, log, err => {
|
||||||
assert.strictEqual(err.NoSuchLifecycleConfiguration, true);
|
assert.strictEqual(err.is.NoSuchLifecycleConfiguration, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -74,7 +74,7 @@ describe('bucketGetObjectLock API', () => {
|
||||||
'object lock is not enabled on the bucket', done => {
|
'object lock is not enabled on the bucket', done => {
|
||||||
const objectLockRequest = getObjectLockConfigRequest(bucketName);
|
const objectLockRequest = getObjectLockConfigRequest(bucketName);
|
||||||
bucketGetObjectLock(authInfo, objectLockRequest, log, err => {
|
bucketGetObjectLock(authInfo, objectLockRequest, log, err => {
|
||||||
assert.strictEqual(err.ObjectLockConfigurationNotFoundError, true);
|
assert.strictEqual(err.is.ObjectLockConfigurationNotFoundError, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -44,7 +44,7 @@ describe('getBucketPolicy API', () => {
|
||||||
it('should return NoSuchBucketPolicy error if ' +
|
it('should return NoSuchBucketPolicy error if ' +
|
||||||
'bucket has no policy', done => {
|
'bucket has no policy', done => {
|
||||||
bucketGetPolicy(authInfo, testBasicRequest, log, err => {
|
bucketGetPolicy(authInfo, testBasicRequest, log, err => {
|
||||||
assert.strictEqual(err.NoSuchBucketPolicy, true);
|
assert.strictEqual(err.is.NoSuchBucketPolicy, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
const assert = require('assert');
|
const assert = require('assert');
|
||||||
const { errors } = require('arsenal');
|
|
||||||
|
|
||||||
const bucketHead = require('../../../lib/api/bucketHead');
|
const bucketHead = require('../../../lib/api/bucketHead');
|
||||||
const { bucketPut } = require('../../../lib/api/bucketPut');
|
const { bucketPut } = require('../../../lib/api/bucketPut');
|
||||||
|
@ -22,7 +21,7 @@ describe('bucketHead API', () => {
|
||||||
|
|
||||||
it('should return an error if the bucket does not exist', done => {
|
it('should return an error if the bucket does not exist', done => {
|
||||||
bucketHead(authInfo, testRequest, log, err => {
|
bucketHead(authInfo, testRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -31,7 +30,7 @@ describe('bucketHead API', () => {
|
||||||
const otherAuthInfo = makeAuthInfo('accessKey2');
|
const otherAuthInfo = makeAuthInfo('accessKey2');
|
||||||
bucketPut(otherAuthInfo, testRequest, log, () => {
|
bucketPut(otherAuthInfo, testRequest, log, () => {
|
||||||
bucketHead(authInfo, testRequest, log, err => {
|
bucketHead(authInfo, testRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.AccessDenied);
|
assert.strictEqual(err.is.AccessDenied, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
const assert = require('assert');
|
const assert = require('assert');
|
||||||
const { errors } = require('arsenal');
|
|
||||||
|
|
||||||
const { checkLocationConstraint } = require('../../../lib/api/bucketPut');
|
const { checkLocationConstraint } = require('../../../lib/api/bucketPut');
|
||||||
const { bucketPut } = require('../../../lib/api/bucketPut');
|
const { bucketPut } = require('../../../lib/api/bucketPut');
|
||||||
|
@ -83,8 +82,8 @@ describe('checkLocationConstraint function', () => {
|
||||||
if (testCheck.isError) {
|
if (testCheck.isError) {
|
||||||
assert.notEqual(checkLocation.error, null,
|
assert.notEqual(checkLocation.error, null,
|
||||||
'Expected failure but got success');
|
'Expected failure but got success');
|
||||||
assert.strictEqual(checkLocation.error.
|
assert.strictEqual(
|
||||||
InvalidLocationConstraint, true);
|
checkLocation.error.is.InvalidLocationConstraint, true);
|
||||||
} else {
|
} else {
|
||||||
assert.ifError(checkLocation.error);
|
assert.ifError(checkLocation.error);
|
||||||
assert.strictEqual(checkLocation.locationConstraint,
|
assert.strictEqual(checkLocation.locationConstraint,
|
||||||
|
@ -105,7 +104,7 @@ describe('bucketPut API', () => {
|
||||||
bucketPut(authInfo, testRequest, log, () => {
|
bucketPut(authInfo, testRequest, log, () => {
|
||||||
bucketPut(otherAuthInfo, testRequest,
|
bucketPut(otherAuthInfo, testRequest,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.BucketAlreadyExists);
|
assert.strictEqual(err.is.BucketAlreadyExists, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -188,9 +187,9 @@ describe('bucketPut API', () => {
|
||||||
post: '',
|
post: '',
|
||||||
};
|
};
|
||||||
bucketPut(authInfo, testRequest, log, err => {
|
bucketPut(authInfo, testRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.InvalidArgument);
|
assert.strictEqual(err.is.InvalidArgument, true);
|
||||||
metadata.getBucket(bucketName, log, err => {
|
metadata.getBucket(bucketName, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -209,9 +208,9 @@ describe('bucketPut API', () => {
|
||||||
post: '',
|
post: '',
|
||||||
};
|
};
|
||||||
bucketPut(authInfo, testRequest, log, err => {
|
bucketPut(authInfo, testRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.InvalidArgument);
|
assert.strictEqual(err.is.InvalidArgument, true);
|
||||||
metadata.getBucket(bucketName, log, err => {
|
metadata.getBucket(bucketName, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -231,9 +230,9 @@ describe('bucketPut API', () => {
|
||||||
post: '',
|
post: '',
|
||||||
};
|
};
|
||||||
bucketPut(authInfo, testRequest, log, err => {
|
bucketPut(authInfo, testRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.UnresolvableGrantByEmailAddress);
|
assert.strictEqual(err.is.UnresolvableGrantByEmailAddress, true);
|
||||||
metadata.getBucket(bucketName, log, err => {
|
metadata.getBucket(bucketName, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -309,7 +308,7 @@ describe('bucketPut API', () => {
|
||||||
it('should prevent anonymous user from accessing putBucket API', done => {
|
it('should prevent anonymous user from accessing putBucket API', done => {
|
||||||
const publicAuthInfo = makeAuthInfo(constants.publicId);
|
const publicAuthInfo = makeAuthInfo(constants.publicId);
|
||||||
bucketPut(publicAuthInfo, testRequest, log, err => {
|
bucketPut(publicAuthInfo, testRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.AccessDenied);
|
assert.strictEqual(err.is.AccessDenied, true);
|
||||||
});
|
});
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
|
@ -367,11 +366,10 @@ describe('bucketPut API', () => {
|
||||||
|
|
||||||
it('should return error if location constraint config is not updated',
|
it('should return error if location constraint config is not updated',
|
||||||
done => bucketPut(authInfo, req, log, err => {
|
done => bucketPut(authInfo, req, log, err => {
|
||||||
const expectedError = errors.InvalidLocationConstraint;
|
assert.strictEqual(err.is.InvalidLocationConstraint, true);
|
||||||
expectedError.description = 'value of the location you are ' +
|
assert.strictEqual(err.description, 'value of the location you are ' +
|
||||||
`attempting to set - ${newLCKey} - is not listed in the ` +
|
`attempting to set - ${newLCKey} - is not listed in the ` +
|
||||||
'locationConstraint config';
|
'locationConstraint config');
|
||||||
assert.deepStrictEqual(err, expectedError);
|
|
||||||
done();
|
done();
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,4 @@
|
||||||
const assert = require('assert');
|
const assert = require('assert');
|
||||||
const { errors } = require('arsenal');
|
|
||||||
|
|
||||||
const aclUtils = require('../../../lib/utilities/aclUtils');
|
const aclUtils = require('../../../lib/utilities/aclUtils');
|
||||||
const { bucketPut } = require('../../../lib/api/bucketPut');
|
const { bucketPut } = require('../../../lib/api/bucketPut');
|
||||||
|
@ -75,7 +74,7 @@ describe('putBucketACL API', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
bucketPutACL(authInfo, testACLRequest, log, err => {
|
bucketPutACL(authInfo, testACLRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.InvalidArgument);
|
assert.strictEqual(err.is.InvalidArgument, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -273,7 +272,7 @@ describe('putBucketACL API', () => {
|
||||||
query: { acl: '' },
|
query: { acl: '' },
|
||||||
};
|
};
|
||||||
return bucketPutACL(authInfo, testACLRequest, log, err => {
|
return bucketPutACL(authInfo, testACLRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.InvalidArgument);
|
assert.strictEqual(err.is.InvalidArgument, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -295,7 +294,7 @@ describe('putBucketACL API', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
bucketPutACL(authInfo, testACLRequest, log, err => {
|
bucketPutACL(authInfo, testACLRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.UnresolvableGrantByEmailAddress);
|
assert.strictEqual(err.is.UnresolvableGrantByEmailAddress, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -421,7 +420,7 @@ describe('putBucketACL API', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
bucketPutACL(authInfo, testACLRequest, log, err => {
|
bucketPutACL(authInfo, testACLRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.MalformedACLError);
|
assert.strictEqual(err.is.MalformedACLError, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -464,7 +463,7 @@ describe('putBucketACL API', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
bucketPutACL(authInfo, testACLRequest, log, err => {
|
bucketPutACL(authInfo, testACLRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.MalformedACLError);
|
assert.strictEqual(err.is.MalformedACLError, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -496,7 +495,7 @@ describe('putBucketACL API', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
return bucketPutACL(authInfo, testACLRequest, log, err => {
|
return bucketPutACL(authInfo, testACLRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.InvalidArgument);
|
assert.strictEqual(err.is.InvalidArgument, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -527,7 +526,7 @@ describe('putBucketACL API', () => {
|
||||||
query: { acl: '' },
|
query: { acl: '' },
|
||||||
};
|
};
|
||||||
bucketPutACL(authInfo, testACLRequest, log, err => {
|
bucketPutACL(authInfo, testACLRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.UnresolvableGrantByEmailAddress);
|
assert.strictEqual(err.is.UnresolvableGrantByEmailAddress, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -563,7 +562,7 @@ describe('putBucketACL API', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
bucketPutACL(authInfo, testACLRequest, log, err => {
|
bucketPutACL(authInfo, testACLRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.MalformedACLError);
|
assert.strictEqual(err.is.MalformedACLError, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -608,7 +607,7 @@ describe('putBucketACL API', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
bucketPutACL(authInfo, testACLRequest, log, err => {
|
bucketPutACL(authInfo, testACLRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.MalformedACLError);
|
assert.strictEqual(err.is.MalformedACLError, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -643,7 +642,7 @@ describe('putBucketACL API', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
bucketPutACL(authInfo, testACLRequest, log, err => {
|
bucketPutACL(authInfo, testACLRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.MalformedXML);
|
assert.strictEqual(err.is.MalformedXML, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -677,7 +676,7 @@ describe('putBucketACL API', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
bucketPutACL(authInfo, testACLRequest, log, err => {
|
bucketPutACL(authInfo, testACLRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.InvalidArgument);
|
assert.strictEqual(err.is.InvalidArgument, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -698,7 +697,7 @@ describe('putBucketACL API', () => {
|
||||||
};
|
};
|
||||||
|
|
||||||
bucketPutACL(authInfo, testACLRequest, log, err => {
|
bucketPutACL(authInfo, testACLRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.InvalidArgument);
|
assert.strictEqual(err.is.InvalidArgument, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -129,7 +129,7 @@ describe('PUT bucket cors :: helper validation functions ', () => {
|
||||||
`<ID>${testValue}</ID>`);
|
`<ID>${testValue}</ID>`);
|
||||||
parseCorsXml(xml, log, err => {
|
parseCorsXml(xml, log, err => {
|
||||||
assert(err, 'Expected error but found none');
|
assert(err, 'Expected error but found none');
|
||||||
assert.deepStrictEqual(err, errors.MalformedXML);
|
assert.strictEqual(err.is.MalformedXML, true);
|
||||||
return done();
|
return done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -175,7 +175,7 @@ describe('PUT bucket cors :: helper validation functions ', () => {
|
||||||
`<MaxAgeSeconds>${testValue}</MaxAgeSeconds>`);
|
`<MaxAgeSeconds>${testValue}</MaxAgeSeconds>`);
|
||||||
parseCorsXml(xml, log, err => {
|
parseCorsXml(xml, log, err => {
|
||||||
assert(err, 'Expected error but found none');
|
assert(err, 'Expected error but found none');
|
||||||
assert.deepStrictEqual(err, errors.MalformedXML);
|
assert.strictEqual(err.is.MalformedXML, true);
|
||||||
return done();
|
return done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -25,7 +25,7 @@ describe('bucketPutEncryption API', () => {
|
||||||
describe('test invalid sse configs', () => {
|
describe('test invalid sse configs', () => {
|
||||||
it('should reject an empty config', done => {
|
it('should reject an empty config', done => {
|
||||||
bucketPutEncryption(authInfo, templateRequest(bucketName, { post: '' }), log, err => {
|
bucketPutEncryption(authInfo, templateRequest(bucketName, { post: '' }), log, err => {
|
||||||
assert.strictEqual(err.MalformedXML, true);
|
assert.strictEqual(err.is.MalformedXML, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -36,7 +36,7 @@ describe('bucketPutEncryption API', () => {
|
||||||
<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
<ServerSideEncryptionConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||||
</ServerSideEncryptionConfiguration>`,
|
</ServerSideEncryptionConfiguration>`,
|
||||||
}), log, err => {
|
}), log, err => {
|
||||||
assert.strictEqual(err.MalformedXML, true);
|
assert.strictEqual(err.is.MalformedXML, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -48,7 +48,7 @@ describe('bucketPutEncryption API', () => {
|
||||||
<Rule></Rule>
|
<Rule></Rule>
|
||||||
</ServerSideEncryptionConfiguration>`,
|
</ServerSideEncryptionConfiguration>`,
|
||||||
}), log, err => {
|
}), log, err => {
|
||||||
assert.strictEqual(err.MalformedXML, true);
|
assert.strictEqual(err.is.MalformedXML, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -56,7 +56,7 @@ describe('bucketPutEncryption API', () => {
|
||||||
it('should reject a config with no SSEAlgorithm', done => {
|
it('should reject a config with no SSEAlgorithm', done => {
|
||||||
const post = templateSSEConfig({});
|
const post = templateSSEConfig({});
|
||||||
bucketPutEncryption(authInfo, templateRequest(bucketName, { post }), log, err => {
|
bucketPutEncryption(authInfo, templateRequest(bucketName, { post }), log, err => {
|
||||||
assert.strictEqual(err.MalformedXML, true);
|
assert.strictEqual(err.is.MalformedXML, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -64,7 +64,7 @@ describe('bucketPutEncryption API', () => {
|
||||||
it('should reject a config with an invalid SSEAlgorithm', done => {
|
it('should reject a config with an invalid SSEAlgorithm', done => {
|
||||||
const post = templateSSEConfig({ algorithm: 'InvalidAlgo' });
|
const post = templateSSEConfig({ algorithm: 'InvalidAlgo' });
|
||||||
bucketPutEncryption(authInfo, templateRequest(bucketName, { post }), log, err => {
|
bucketPutEncryption(authInfo, templateRequest(bucketName, { post }), log, err => {
|
||||||
assert.strictEqual(err.MalformedXML, true);
|
assert.strictEqual(err.is.MalformedXML, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -72,7 +72,7 @@ describe('bucketPutEncryption API', () => {
|
||||||
it('should reject a config with SSEAlgorithm == AES256 and a provided KMSMasterKeyID', done => {
|
it('should reject a config with SSEAlgorithm == AES256 and a provided KMSMasterKeyID', done => {
|
||||||
const post = templateSSEConfig({ algorithm: 'AES256', keyId: '12345' });
|
const post = templateSSEConfig({ algorithm: 'AES256', keyId: '12345' });
|
||||||
bucketPutEncryption(authInfo, templateRequest(bucketName, { post }), log, err => {
|
bucketPutEncryption(authInfo, templateRequest(bucketName, { post }), log, err => {
|
||||||
assert.strictEqual(err.InvalidArgument, true);
|
assert.strictEqual(err.is.InvalidArgument, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -177,7 +177,7 @@ describe('bucketPutEncryption API', () => {
|
||||||
});
|
});
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -48,7 +48,7 @@ describe('putBucketObjectLock API', () => {
|
||||||
|
|
||||||
it('should return InvalidBucketState error', done => {
|
it('should return InvalidBucketState error', done => {
|
||||||
bucketPutObjectLock(authInfo, putObjLockRequest, log, err => {
|
bucketPutObjectLock(authInfo, putObjLockRequest, log, err => {
|
||||||
assert.strictEqual(err.InvalidBucketState, true);
|
assert.strictEqual(err.is.InvalidBucketState, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -70,7 +70,7 @@ describe('putBucketPolicy API', () => {
|
||||||
expectedBucketPolicy.Statement[0].Resource = 'arn:aws::s3:::badname';
|
expectedBucketPolicy.Statement[0].Resource = 'arn:aws::s3:::badname';
|
||||||
bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy),
|
bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy),
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.strictEqual(err.MalformedPolicy, true);
|
assert.strictEqual(err.is.MalformedPolicy, true);
|
||||||
assert.strictEqual(err.description, 'Policy has invalid resource');
|
assert.strictEqual(err.description, 'Policy has invalid resource');
|
||||||
return done();
|
return done();
|
||||||
});
|
});
|
||||||
|
@ -81,7 +81,7 @@ describe('putBucketPolicy API', () => {
|
||||||
{ StringEquals: { 's3:x-amz-acl': ['public-read'] } };
|
{ StringEquals: { 's3:x-amz-acl': ['public-read'] } };
|
||||||
bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), log,
|
bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), log,
|
||||||
err => {
|
err => {
|
||||||
assert.strictEqual(err.NotImplemented, true);
|
assert.strictEqual(err.is.NotImplemented, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -90,7 +90,7 @@ describe('putBucketPolicy API', () => {
|
||||||
expectedBucketPolicy.Statement[0].Principal = { Service: ['test.com'] };
|
expectedBucketPolicy.Statement[0].Principal = { Service: ['test.com'] };
|
||||||
bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), log,
|
bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), log,
|
||||||
err => {
|
err => {
|
||||||
assert.strictEqual(err.NotImplemented, true);
|
assert.strictEqual(err.is.NotImplemented, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -100,7 +100,7 @@ describe('putBucketPolicy API', () => {
|
||||||
{ Federated: 'www.test.com' };
|
{ Federated: 'www.test.com' };
|
||||||
bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), log,
|
bucketPutPolicy(authInfo, getPolicyRequest(expectedBucketPolicy), log,
|
||||||
err => {
|
err => {
|
||||||
assert.strictEqual(err.NotImplemented, true);
|
assert.strictEqual(err.is.NotImplemented, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -13,7 +13,7 @@ function checkError(xml, expectedErr, cb) {
|
||||||
if (expectedErr === null) {
|
if (expectedErr === null) {
|
||||||
assert.strictEqual(err, null, `expected no error but got '${err}'`);
|
assert.strictEqual(err, null, `expected no error but got '${err}'`);
|
||||||
} else {
|
} else {
|
||||||
assert(err[expectedErr], 'incorrect error response: should be ' +
|
assert(err.is[expectedErr], 'incorrect error response: should be ' +
|
||||||
`'Error: ${expectedErr}' but got '${err}'`);
|
`'Error: ${expectedErr}' but got '${err}'`);
|
||||||
}
|
}
|
||||||
return cb();
|
return cb();
|
||||||
|
|
|
@ -1,6 +1,5 @@
|
||||||
const crypto = require('crypto');
|
const crypto = require('crypto');
|
||||||
const assert = require('assert');
|
const assert = require('assert');
|
||||||
const { errors } = require('arsenal');
|
|
||||||
|
|
||||||
const BucketInfo = require('arsenal').models.BucketInfo;
|
const BucketInfo = require('arsenal').models.BucketInfo;
|
||||||
const bucketGet = require('../../../lib/api/bucketGet');
|
const bucketGet = require('../../../lib/api/bucketGet');
|
||||||
|
@ -96,7 +95,7 @@ function confirmDeleted(done) {
|
||||||
process.nextTick(() => {
|
process.nextTick(() => {
|
||||||
process.nextTick(() => {
|
process.nextTick(() => {
|
||||||
metadata.getBucket(bucketName, log, err => {
|
metadata.getBucket(bucketName, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
return checkBucketListing(authInfo, bucketName, 0, done);
|
return checkBucketListing(authInfo, bucketName, 0, done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -138,7 +137,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
'different account sends put bucket request for bucket with ' +
|
'different account sends put bucket request for bucket with ' +
|
||||||
'deleted flag', done => {
|
'deleted flag', done => {
|
||||||
bucketPut(otherAccountAuthInfo, baseTestRequest, log, err => {
|
bucketPut(otherAccountAuthInfo, baseTestRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.BucketAlreadyExists);
|
assert.strictEqual(err.is.BucketAlreadyExists, true);
|
||||||
metadata.getBucket(bucketName, log, (err, data) => {
|
metadata.getBucket(bucketName, log, (err, data) => {
|
||||||
assert.strictEqual(data._transient, false);
|
assert.strictEqual(data._transient, false);
|
||||||
assert.strictEqual(data._deleted, true);
|
assert.strictEqual(data._deleted, true);
|
||||||
|
@ -193,7 +192,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
'x-amz-acl': 'public-read' }, 'headers',
|
'x-amz-acl': 'public-read' }, 'headers',
|
||||||
baseTestRequest, baseTestRequest.headers);
|
baseTestRequest, baseTestRequest.headers);
|
||||||
bucketPutACL(otherAccountAuthInfo, putACLRequest, log, err => {
|
bucketPutACL(otherAccountAuthInfo, putACLRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
metadata.getBucket(bucketName, log, (err, data) => {
|
metadata.getBucket(bucketName, log, (err, data) => {
|
||||||
assert.strictEqual(data._deleted, true);
|
assert.strictEqual(data._deleted, true);
|
||||||
assert.strictEqual(data._transient, false);
|
assert.strictEqual(data._transient, false);
|
||||||
|
@ -212,7 +211,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
baseTestRequest, baseTestRequest.headers);
|
baseTestRequest, baseTestRequest.headers);
|
||||||
const unauthorizedAccount = makeAuthInfo('keepMeOut');
|
const unauthorizedAccount = makeAuthInfo('keepMeOut');
|
||||||
bucketPutACL(unauthorizedAccount, putACLRequest, log, err => {
|
bucketPutACL(unauthorizedAccount, putACLRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.AccessDenied);
|
assert.strictEqual(err.is.AccessDenied, true);
|
||||||
metadata.getBucket(bucketName, log, (err, data) => {
|
metadata.getBucket(bucketName, log, (err, data) => {
|
||||||
assert.strictEqual(data._deleted, true);
|
assert.strictEqual(data._deleted, true);
|
||||||
assert.strictEqual(data._transient, false);
|
assert.strictEqual(data._transient, false);
|
||||||
|
@ -266,7 +265,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
const postBody = Buffer.from('I am a body', 'utf8');
|
const postBody = Buffer.from('I am a body', 'utf8');
|
||||||
const putObjRequest = new DummyRequest(setUpRequest, postBody);
|
const putObjRequest = new DummyRequest(setUpRequest, postBody);
|
||||||
objectPut(otherAccountAuthInfo, putObjRequest, undefined, log, err => {
|
objectPut(otherAccountAuthInfo, putObjRequest, undefined, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -314,7 +313,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
initiateRequest.objectKey = 'objectName';
|
initiateRequest.objectKey = 'objectName';
|
||||||
initiateMultipartUpload(otherAccountAuthInfo, initiateRequest, log,
|
initiateMultipartUpload(otherAccountAuthInfo, initiateRequest, log,
|
||||||
err => {
|
err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -331,7 +330,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
'authorized', done => {
|
'authorized', done => {
|
||||||
bucketDelete(otherAccountAuthInfo, baseTestRequest,
|
bucketDelete(otherAccountAuthInfo, baseTestRequest,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.AccessDenied);
|
assert.strictEqual(err.is.AccessDenied, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -340,7 +339,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
'NoSuchBucket error and complete deletion', done => {
|
'NoSuchBucket error and complete deletion', done => {
|
||||||
bucketDeleteWebsite(authInfo, baseTestRequest,
|
bucketDeleteWebsite(authInfo, baseTestRequest,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
confirmDeleted(done);
|
confirmDeleted(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -349,7 +348,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
'NoSuchBucket error and complete deletion', done => {
|
'NoSuchBucket error and complete deletion', done => {
|
||||||
bucketGet(authInfo, baseTestRequest,
|
bucketGet(authInfo, baseTestRequest,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
confirmDeleted(done);
|
confirmDeleted(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -358,7 +357,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
'NoSuchBucket error and complete deletion', done => {
|
'NoSuchBucket error and complete deletion', done => {
|
||||||
bucketGetACL(authInfo, baseTestRequest,
|
bucketGetACL(authInfo, baseTestRequest,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
confirmDeleted(done);
|
confirmDeleted(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -367,7 +366,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
'NoSuchBucket error and complete deletion', done => {
|
'NoSuchBucket error and complete deletion', done => {
|
||||||
bucketGetCors(authInfo, baseTestRequest,
|
bucketGetCors(authInfo, baseTestRequest,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
confirmDeleted(done);
|
confirmDeleted(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -383,7 +382,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
bucketPutCorsRequest.headers['content-md5'] = crypto.createHash('md5')
|
bucketPutCorsRequest.headers['content-md5'] = crypto.createHash('md5')
|
||||||
.update(bucketPutCorsRequest.post, 'utf8').digest('base64');
|
.update(bucketPutCorsRequest.post, 'utf8').digest('base64');
|
||||||
bucketPutCors(authInfo, bucketPutCorsRequest, log, err => {
|
bucketPutCors(authInfo, bucketPutCorsRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
confirmDeleted(done);
|
confirmDeleted(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -391,7 +390,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
it('bucketDeleteCors request on bucket with delete flag should return ' +
|
it('bucketDeleteCors request on bucket with delete flag should return ' +
|
||||||
'NoSuchBucket error and complete deletion', done => {
|
'NoSuchBucket error and complete deletion', done => {
|
||||||
bucketDeleteCors(authInfo, baseTestRequest, log, err => {
|
bucketDeleteCors(authInfo, baseTestRequest, log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
confirmDeleted(done);
|
confirmDeleted(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -400,7 +399,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
'NoSuchBucket error and complete deletion', done => {
|
'NoSuchBucket error and complete deletion', done => {
|
||||||
bucketGetWebsite(authInfo, baseTestRequest,
|
bucketGetWebsite(authInfo, baseTestRequest,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
confirmDeleted(done);
|
confirmDeleted(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -414,7 +413,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
'</WebsiteConfiguration>';
|
'</WebsiteConfiguration>';
|
||||||
bucketPutWebsite(authInfo, bucketPutWebsiteRequest,
|
bucketPutWebsite(authInfo, bucketPutWebsiteRequest,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
confirmDeleted(done);
|
confirmDeleted(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -423,7 +422,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
'NoSuchBucket error and complete deletion', done => {
|
'NoSuchBucket error and complete deletion', done => {
|
||||||
bucketHead(authInfo, baseTestRequest,
|
bucketHead(authInfo, baseTestRequest,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
confirmDeleted(done);
|
confirmDeleted(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -438,13 +437,13 @@ describe('deleted flag bucket handling', () => {
|
||||||
if (extraArgNeeded) {
|
if (extraArgNeeded) {
|
||||||
return apiAction(authInfo, mpuRequest, undefined,
|
return apiAction(authInfo, mpuRequest, undefined,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchUpload);
|
assert.strictEqual(err.is.NoSuchUpload, true);
|
||||||
return done();
|
return done();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
return apiAction(authInfo, mpuRequest,
|
return apiAction(authInfo, mpuRequest,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchUpload);
|
assert.strictEqual(err.is.NoSuchUpload, true);
|
||||||
return done();
|
return done();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -495,7 +494,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
listRequest.query = {};
|
listRequest.query = {};
|
||||||
listMultipartUploads(authInfo, listRequest,
|
listMultipartUploads(authInfo, listRequest,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -505,7 +504,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
done => {
|
done => {
|
||||||
objectGet(authInfo, baseTestRequest, false,
|
objectGet(authInfo, baseTestRequest, false,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
confirmDeleted(done);
|
confirmDeleted(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -514,7 +513,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
'NoSuchBucket error and complete deletion', done => {
|
'NoSuchBucket error and complete deletion', done => {
|
||||||
objectGetACL(authInfo, baseTestRequest,
|
objectGetACL(authInfo, baseTestRequest,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
confirmDeleted(done);
|
confirmDeleted(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -523,7 +522,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
'NoSuchBucket error and complete deletion', done => {
|
'NoSuchBucket error and complete deletion', done => {
|
||||||
objectHead(authInfo, baseTestRequest,
|
objectHead(authInfo, baseTestRequest,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
confirmDeleted(done);
|
confirmDeleted(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -532,7 +531,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
'NoSuchBucket error and complete deletion', done => {
|
'NoSuchBucket error and complete deletion', done => {
|
||||||
objectPutACL(authInfo, baseTestRequest,
|
objectPutACL(authInfo, baseTestRequest,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
confirmDeleted(done);
|
confirmDeleted(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -541,7 +540,7 @@ describe('deleted flag bucket handling', () => {
|
||||||
'NoSuchBucket error', done => {
|
'NoSuchBucket error', done => {
|
||||||
objectDelete(authInfo, baseTestRequest,
|
objectDelete(authInfo, baseTestRequest,
|
||||||
log, err => {
|
log, err => {
|
||||||
assert.deepStrictEqual(err, errors.NoSuchBucket);
|
assert.strictEqual(err.is.NoSuchBucket, true);
|
||||||
confirmDeleted(done);
|
confirmDeleted(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue