Compare commits

...

11 Commits

Author SHA1 Message Date
Taylor McKinnon b226ecf597 bf(CLDSRV-232): Prevent empty NextContinuationToken from being sent at listing end
(cherry picked from commit 4069a94f78)
2022-07-06 11:24:01 -07:00
Ronnie Smith fbcd1dd327 feature: CLDSRV-162 update bad version ids to be proper
(cherry picked from commit 5a6b01c4d5)
2022-05-11 13:39:40 -07:00
Jonathan Gramain a1c4420eab [7.10] CLDSRV-177 add missing test helper checkObjectData
(cherry picked from commit 413ebe743c)
2022-04-20 12:05:43 -07:00
Jonathan Gramain 108d1c920f bugfix: CLDSRV-177 fix crash with empty object replication
Fix a case of crash when a replication occurs with an empty object
over a non-empty object.

It is not clear how this happens in practice but there can be some
corner cases with race conditions between object replication and
versioning getting suspended on the target bucket at the same time,
that could lead to this situation, as the check between replication
configuration and actual replication is not atomic.

(cherry picked from commit a4e8cbebe6)
2022-04-20 12:05:19 -07:00
Nicolas Humbert d30430a81c CLDSRV-173 DeleteMarkers created by Lifecycle should not be replicated 2022-04-15 09:11:47 -04:00
bert-e ec0dab4168 Merge branch 'bugfix/S3C-5390-s3api_head-object_with_part-number_1_on_empty_file_fails-hotfix-hotfix-7.10.3' into q/7.10.3.1 2022-04-12 18:47:12 +00:00
Artem Bakalov af95fea311 remove .only
(cherry picked from commit b4725aa032)
2022-04-12 11:05:07 -07:00
Artem Bakalov 8f3e737664 S3C-5390 s3api head-object with part-number 1 on empty file fails: httpCode 416
(cherry picked from commit 4f3195a6ca)
2022-04-12 11:05:07 -07:00
bert-e 24c88e90b0 Merge branch 'improvement/CLDSRV-171-403-when-no-region-fix' into q/7.10.3.1 2022-04-12 18:01:46 +00:00
Jonathan Gramain c2dbbfa008 bugfix: CLDSRV-170 skip orphan cleanup in UploadPart[Copy]
Do not delete orphan data in UploadPart/UploadPartCopy on overwrite
iff a CompleteMPU of the target MPU is already in progress.

This is to prevent a race condition where a CompleteMPU is running
while UploadPart is uploading a part for the same MPU.

It leaves an orphan in storage since only one of the upload data will
be present in the finished MPU, but the window is limited to the
CompleteMPU execution and should only occur when there are retries of
UploadPart due to prior stuck requests, or with broken clients
misusing the MPU API, so it should be acceptable.

Implementation details:

- set a flag in the MPU overview key when starting the CompleteMPU
  process, before listing the parts from metadata to construct the
  manifest

- in UploadPart/UploadPartCopy, after the part metadata is written and
  if the same part already existed, re-fetch the MPU overview key to
  check the flag: if set, skip the deletion of the old data of this
  part, since the CompleteMPU process in progress may choose either
  part data depending on the exact timing of the listing vs. the
  part overwrite.

(cherry picked from commit 8496111518)
2022-04-11 19:12:31 -07:00
Naren bbd791cd25 improvement: CLDSRV-171 upgrade vaultclient 2022-04-11 17:45:37 -07:00
29 changed files with 901 additions and 179 deletions

View File

@ -174,6 +174,10 @@ const constants = {
'bucket',
],
allowedUtapiEventFilterStates: ['allow', 'deny'],
// The AWS assumed Role resource type
assumedRoleArnResourceType: 'assumed-role',
// Session name of the backbeat lifecycle assumed role session.
backbeatLifecycleSessionName: 'backbeat-lifecycle',
};
module.exports = constants;

View File

@ -1,7 +1,8 @@
const { evaluators, actionMaps, RequestContext } = require('arsenal').policies;
const constants = require('../../../../constants');
const { allAuthedUsersId, bucketOwnerActions, logId, publicId } = constants;
const { allAuthedUsersId, bucketOwnerActions, logId, publicId,
assumedRoleArnResourceType, backbeatLifecycleSessionName } = constants;
// whitelist buckets to allow public read on objects
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS ?
@ -364,10 +365,34 @@ function validatePolicyResource(bucketName, policy) {
});
}
/** isLifecycleSession - check if it is the Lifecycle assumed role session arn.
* @param {string} arn - Amazon resource name - example:
* arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle
* @return {boolean} true if Lifecycle assumed role session arn, false if not.
*/
function isLifecycleSession(arn) {
if (!arn) {
return false;
}
const arnSplits = arn.split(':');
const service = arnSplits[2];
const resourceNames = arnSplits[arnSplits.length - 1].split('/');
const resourceType = resourceNames[0];
const sessionName = resourceNames[resourceNames.length - 1];
return (service === 'sts' &&
resourceType === assumedRoleArnResourceType &&
sessionName === backbeatLifecycleSessionName);
}
module.exports = {
isBucketAuthorized,
isObjAuthorized,
checkBucketAcls,
checkObjectAcls,
validatePolicyResource,
isLifecycleSession,
};

View File

@ -136,9 +136,10 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
size,
headers,
isDeleteMarker,
replicationInfo: getReplicationInfo(objectKey, bucketMD, false, size),
replicationInfo: getReplicationInfo(objectKey, bucketMD, false, size, null, null, authInfo, isDeleteMarker),
log,
};
if (!isDeleteMarker) {
metadataStoreParams.contentType = request.headers['content-type'];
metadataStoreParams.cacheControl = request.headers['cache-control'];

View File

@ -1,4 +1,5 @@
const s3config = require('../../../Config').config;
const { isLifecycleSession } = require('../authorization/permissionChecks.js');
function _getBackend(objectMD, site) {
const backends = objectMD ? objectMD.replicationInfo.backends : [];
@ -63,14 +64,22 @@ function _getReplicationInfo(rule, replicationConfig, content, operationType,
* @param {boolean} objSize - The size, in bytes, of the object being PUT
* @param {string} operationType - The type of operation to replicate
* @param {object} objectMD - The object metadata
* @param {AuthInfo} [authInfo] - authentication info of object owner
* @param {boolean} [isDeleteMarker] - whether creating a delete marker
* @return {undefined}
*/
function getReplicationInfo(objKey, bucketMD, isMD, objSize, operationType,
objectMD) {
objectMD, authInfo, isDeleteMarker) {
const content = isMD || objSize === 0 ? ['METADATA'] : ['DATA', 'METADATA'];
const config = bucketMD.getReplicationConfiguration();
// If bucket does not have a replication configuration, do not replicate.
if (config) {
// If delete an object due to a lifecycle action,
// the delete marker is not replicated to the destination buckets.
if (isDeleteMarker && authInfo && isLifecycleSession(authInfo.getArn())) {
return undefined;
}
const rule = config.rules.find(rule =>
(objKey.startsWith(rule.prefix) && rule.enabled));
if (rule) {

View File

@ -8,12 +8,13 @@
*
* @param {array|string|null} prev - list of keys from the object being
* overwritten
* @param {array} curr - list of keys to be used in composing current object
* @param {array|null} curr - list of keys to be used in composing
* current object
* @returns {boolean} true if no key in `curr` is present in `prev`,
* false otherwise
*/
function locationKeysHaveChanged(prev, curr) {
if (!prev || prev.length === 0) {
if (!prev || prev.length === 0 || !curr) {
return true;
}
// backwards compatibility check if object is of model version 2

View File

@ -218,7 +218,8 @@ function processMasterVersions(bucketName, listParams, list) {
} else if (p.tag !== 'NextMarker' &&
p.tag !== 'EncodingType' &&
p.tag !== 'Delimiter' &&
p.tag !== 'StartAfter') {
p.tag !== 'StartAfter' &&
p.tag !== 'NextContinuationToken') {
xml.push(`<${p.tag}/>`);
}
});

View File

@ -158,6 +158,22 @@ function completeMultipartUpload(authInfo, request, log, callback) {
}
return next(errors.MalformedXML, destBucket);
},
function markOverviewForCompletion(destBucket, objMD, mpuBucket, jsonList,
storedMetadata, location, mpuOverviewKey, next) {
return services.metadataMarkMPObjectForCompletion({
bucketName: mpuBucket.getName(),
objectKey,
uploadId,
splitter,
storedMetadata,
}, log, err => {
if (err) {
return next(err);
}
return next(null, destBucket, objMD, mpuBucket,
jsonList, storedMetadata, location, mpuOverviewKey);
});
},
function retrieveParts(destBucket, objMD, mpuBucket, jsonList,
storedMetadata, location, mpuOverviewKey, next) {
return services.getMPUparts(mpuBucket.getName(), uploadId, log,

View File

@ -130,10 +130,12 @@ function objectHead(authInfo, request, log, callback) {
return callback(errors.BadRequest, corsHeaders);
}
const partSize = getPartSize(objMD, partNumber);
if (!partSize) {
const isEmptyObject = objLength === 0;
if (!partSize && !isEmptyObject) {
return callback(errors.InvalidRange, corsHeaders);
}
responseHeaders['content-length'] = partSize;
responseHeaders['content-length'] = isEmptyObject ? 0 : partSize;
const partsCount = getPartCountFromMd5(objMD);
if (partsCount) {
responseHeaders['x-amz-mp-parts-count'] = partsCount;

View File

@ -232,12 +232,12 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
res.controllingLocationConstraint;
return next(null, dataLocator, destBucketMD,
destObjLocationConstraint, copyObjectSize,
sourceVerId, sourceLocationConstraintName);
sourceVerId, sourceLocationConstraintName, splitter);
});
},
function goGetData(dataLocator, destBucketMD,
destObjLocationConstraint, copyObjectSize, sourceVerId,
sourceLocationConstraintName, next) {
sourceLocationConstraintName, splitter, next) {
data.uploadPartCopy(request, log, destBucketMD,
sourceLocationConstraintName,
destObjLocationConstraint, dataLocator, dataStoreContext,
@ -246,18 +246,18 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
if (error.message === 'skip') {
return next(skipError, destBucketMD, eTag,
lastModified, sourceVerId,
serverSideEncryption);
serverSideEncryption, lastModified, splitter);
}
return next(error, destBucketMD);
}
return next(null, destBucketMD, locations, eTag,
copyObjectSize, sourceVerId, serverSideEncryption,
lastModified);
lastModified, splitter);
});
},
function getExistingPartInfo(destBucketMD, locations, totalHash,
copyObjectSize, sourceVerId, serverSideEncryption, lastModified,
next) {
splitter, next) {
const partKey =
`${uploadId}${constants.splitter}${paddedPartNumber}`;
metadata.getObjectMD(mpuBucketName, partKey, {}, log,
@ -281,12 +281,12 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
}
return next(null, destBucketMD, locations, totalHash,
prevObjectSize, copyObjectSize, sourceVerId,
serverSideEncryption, lastModified, oldLocations);
serverSideEncryption, lastModified, oldLocations, splitter);
});
},
function storeNewPartMetadata(destBucketMD, locations, totalHash,
prevObjectSize, copyObjectSize, sourceVerId, serverSideEncryption,
lastModified, oldLocations, next) {
lastModified, oldLocations, splitter, next) {
const metaStoreParams = {
partNumber: paddedPartNumber,
contentMD5: totalHash,
@ -302,20 +302,58 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
{ error: err, method: 'storeNewPartMetadata' });
return next(err);
}
return next(null, oldLocations, destBucketMD, totalHash,
return next(null, locations, oldLocations, destBucketMD, totalHash,
lastModified, sourceVerId, serverSideEncryption,
prevObjectSize, copyObjectSize);
prevObjectSize, copyObjectSize, splitter);
});
},
function cleanupExistingData(oldLocations, destBucketMD, totalHash,
function checkCanDeleteOldLocations(partLocations, oldLocations, destBucketMD,
totalHash, lastModified, sourceVerId, serverSideEncryption,
prevObjectSize, copyObjectSize, splitter, next) {
if (!oldLocations) {
return next(null, oldLocations, destBucketMD, totalHash,
lastModified, sourceVerId, serverSideEncryption,
prevObjectSize, copyObjectSize);
}
return services.isCompleteMPUInProgress({
bucketName: destBucketName,
objectKey: destObjectKey,
uploadId,
splitter,
}, log, (err, completeInProgress) => {
if (err) {
return next(err, destBucketMD);
}
let oldLocationsToDelete = oldLocations;
// Prevent deletion of old data if a completeMPU
// is already in progress because then there is no
// guarantee that the old location will not be the
// committed one.
if (completeInProgress) {
log.warn('not deleting old locations because CompleteMPU is in progress', {
method: 'objectPutCopyPart::checkCanDeleteOldLocations',
bucketName: destBucketName,
objectKey: destObjectKey,
uploadId,
partLocations,
oldLocations,
});
oldLocationsToDelete = null;
}
return next(null, oldLocationsToDelete, destBucketMD, totalHash,
lastModified, sourceVerId, serverSideEncryption,
prevObjectSize, copyObjectSize);
});
},
function cleanupExistingData(oldLocationsToDelete, destBucketMD, totalHash,
lastModified, sourceVerId, serverSideEncryption,
prevObjectSize, copyObjectSize, next) {
// Clean up the old data now that new metadata (with new
// data locations) has been stored
if (oldLocations) {
if (oldLocationsToDelete) {
const delLog = logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids());
return data.batchDelete(oldLocations, request.method, null,
return data.batchDelete(oldLocationsToDelete, request.method, null,
delLog, err => {
if (err) {
// if error, log the error and move on as it is not

View File

@ -13,6 +13,7 @@ const kms = require('../kms/wrapper');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const logger = require('../utilities/logger');
const services = require('../services');
const { config } = require('../Config');
const multipleBackendGateway = require('../data/multipleBackendGateway');
const locationConstraintCheck
@ -272,19 +273,19 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
}
return next(null, destinationBucket,
objectLocationConstraint, cipherBundle,
partKey, prevObjectSize, oldLocations, partInfo);
partKey, prevObjectSize, oldLocations, partInfo, splitter);
});
},
// Store in data backend.
(destinationBucket, objectLocationConstraint, cipherBundle,
partKey, prevObjectSize, oldLocations, partInfo, next) => {
partKey, prevObjectSize, oldLocations, partInfo, splitter, next) => {
// NOTE: set oldLocations to null so we do not batchDelete for now
if (partInfo && partInfo.dataStoreType === 'azure') {
// skip to storing metadata
return next(null, destinationBucket, partInfo,
partInfo.dataStoreETag,
cipherBundle, partKey, prevObjectSize, null,
objectLocationConstraint);
objectLocationConstraint, splitter);
}
const objectContext = {
bucketName,
@ -304,12 +305,13 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
}
return next(null, destinationBucket, dataGetInfo, hexDigest,
cipherBundle, partKey, prevObjectSize, oldLocations,
objectLocationConstraint);
objectLocationConstraint, splitter);
});
},
// Store data locations in metadata and delete any overwritten data.
// Store data locations in metadata and delete any overwritten
// data if completeMPU hasn't been initiated yet.
(destinationBucket, dataGetInfo, hexDigest, cipherBundle, partKey,
prevObjectSize, oldLocations, objectLocationConstraint, next) => {
prevObjectSize, oldLocations, objectLocationConstraint, splitter, next) => {
// Use an array to be consistent with objectPutCopyPart where there
// could be multiple locations.
const partLocations = [dataGetInfo];
@ -339,19 +341,54 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
});
return next(err, destinationBucket);
}
return next(null, oldLocations, objectLocationConstraint,
destinationBucket, hexDigest, prevObjectSize);
return next(null, partLocations, oldLocations, objectLocationConstraint,
destinationBucket, hexDigest, prevObjectSize, splitter);
});
},
(partLocations, oldLocations, objectLocationConstraint, destinationBucket,
hexDigest, prevObjectSize, splitter, next) => {
if (!oldLocations) {
return next(null, oldLocations, objectLocationConstraint,
destinationBucket, hexDigest, prevObjectSize);
}
return services.isCompleteMPUInProgress({
bucketName,
objectKey,
uploadId,
splitter,
}, log, (err, completeInProgress) => {
if (err) {
return next(err, destinationBucket);
}
let oldLocationsToDelete = oldLocations;
// Prevent deletion of old data if a completeMPU
// is already in progress because then there is no
// guarantee that the old location will not be the
// committed one.
if (completeInProgress) {
log.warn('not deleting old locations because CompleteMPU is in progress', {
method: 'objectPutPart::metadata.getObjectMD',
bucketName,
objectKey,
uploadId,
partLocations,
oldLocations,
});
oldLocationsToDelete = null;
}
return next(null, oldLocationsToDelete, objectLocationConstraint,
destinationBucket, hexDigest, prevObjectSize);
});
},
// Clean up any old data now that new metadata (with new
// data locations) has been stored.
(oldLocations, objectLocationConstraint, destinationBucket, hexDigest,
(oldLocationsToDelete, objectLocationConstraint, destinationBucket, hexDigest,
prevObjectSize, next) => {
if (oldLocations) {
if (oldLocationsToDelete) {
log.trace('overwriting mpu part, deleting data');
const delLog = logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids());
return data.batchDelete(oldLocations, request.method,
return data.batchDelete(oldLocationsToDelete, request.method,
objectLocationConstraint, delLog, err => {
if (err) {
// if error, log the error and move on as it is not

View File

@ -458,6 +458,80 @@ const services = {
});
},
/**
* Mark the MPU overview key with a flag when starting the
* CompleteMPU operation, to be checked by "put part" operations
*
* @param {object} params - params object
* @param {string} params.bucketName - name of MPU bucket
* @param {string} params.objectKey - object key
* @param {string} params.uploadId - upload ID
* @param {string} params.splitter - splitter for this overview key
* @param {object} params.storedMetadata - original metadata of the overview key
* @param {Logger} log - Logger object
* @param {function} cb - callback(err)
* @return {undefined}
*/
metadataMarkMPObjectForCompletion(params, log, cb) {
assert.strictEqual(typeof params, 'object');
assert.strictEqual(typeof params.bucketName, 'string');
assert.strictEqual(typeof params.objectKey, 'string');
assert.strictEqual(typeof params.uploadId, 'string');
assert.strictEqual(typeof params.splitter, 'string');
assert.strictEqual(typeof params.storedMetadata, 'object');
const splitter = params.splitter;
const longMPUIdentifier =
`overview${splitter}${params.objectKey}${splitter}${params.uploadId}`;
const multipartObjectMD = Object.assign({}, params.storedMetadata);
multipartObjectMD.completeInProgress = true;
metadata.putObjectMD(params.bucketName, longMPUIdentifier, multipartObjectMD,
{}, log, err => {
if (err) {
log.error('error from metadata', { error: err });
return cb(err);
}
return cb();
});
},
/**
* Returns if a CompleteMPU operation is in progress for this
* object, by looking at the `completeInProgress` flag stored in
* the overview key
*
* @param {object} params - params object
* @param {string} params.bucketName - bucket name where object should be stored
* @param {string} params.objectKey - object key
* @param {string} params.uploadId - upload ID
* @param {string} params.splitter - splitter for this overview key
* @param {object} log - request logger instance
* @param {function} cb - callback(err, {bool} completeInProgress)
* @return {undefined}
*/
isCompleteMPUInProgress(params, log, cb) {
assert.strictEqual(typeof params, 'object');
assert.strictEqual(typeof params.bucketName, 'string');
assert.strictEqual(typeof params.objectKey, 'string');
assert.strictEqual(typeof params.uploadId, 'string');
assert.strictEqual(typeof params.splitter, 'string');
const mpuBucketName = `${constants.mpuBucketPrefix}${params.bucketName}`;
const splitter = params.splitter;
const mpuOverviewKey =
`overview${splitter}${params.objectKey}${splitter}${params.uploadId}`;
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, {}, log,
(err, res) => {
if (err) {
log.error('error getting the overview object from mpu bucket', {
error: err,
method: 'services.isCompleteMPUInProgress',
params,
});
return cb(err);
}
return cb(null, Boolean(res.completeInProgress));
});
},
/**
* Checks whether bucket exists, multipart upload

View File

@ -38,7 +38,7 @@
"utapi": "scality/utapi#7.10.6",
"utf8": "~2.1.1",
"uuid": "^3.0.1",
"vaultclient": "scality/vaultclient#7.10.2",
"vaultclient": "scality/vaultclient#7.10.6",
"werelogs": "scality/werelogs#0a4c576",
"xml2js": "~0.4.16"
},

View File

@ -210,5 +210,39 @@ describe('Complete MPU', () => {
});
});
});
describe('with re-upload of part during CompleteMPU execution', () => {
let uploadId;
let eTag;
beforeEach(() => _initiateMpuAndPutOnePart()
.then(result => {
uploadId = result.uploadId;
eTag = result.eTag;
})
);
it('should complete the MPU successfully and leave a readable object', done => {
async.parallel([
doneReUpload => s3.uploadPart({
Bucket: bucket,
Key: key,
PartNumber: 1,
UploadId: uploadId,
Body: 'foo',
}, err => {
// in case the CompleteMPU finished earlier,
// we may get a NoSuchKey error, so just
// ignore it
if (err && err.code === 'NoSuchKey') {
return doneReUpload();
}
return doneReUpload(err);
}),
doneComplete => _completeMpuAndCheckVid(
uploadId, eTag, undefined, doneComplete),
], done);
});
});
});
});

View File

@ -577,6 +577,72 @@ describe('Object Part Copy', () => {
checkNoError(err);
});
});
it('should not corrupt object if overwriting an existing part by copying a part ' +
'while the MPU is being completed', () => {
// AWS response etag for this completed MPU
const finalObjETag = '"db77ebbae9e9f5a244a26b86193ad818-1"';
process.stdout.write('Putting first part in MPU test');
return s3.uploadPartCopy({ Bucket: destBucketName,
Key: destObjName,
CopySource: `${sourceBucketName}/${sourceObjName}`,
PartNumber: 1,
UploadId: uploadId,
}).promise().then(res => {
assert.strictEqual(res.ETag, etag);
assert(res.LastModified);
}).then(() => {
process.stdout.write('Overwriting first part in MPU test and completing MPU ' +
'at the same time');
return Promise.all([
s3.uploadPartCopy({
Bucket: destBucketName,
Key: destObjName,
CopySource: `${sourceBucketName}/${sourceObjName}`,
PartNumber: 1,
UploadId: uploadId,
}).promise().catch(err => {
// in case the CompleteMPU finished
// earlier, we may get a NoSuchKey error,
// so just ignore it and resolve with a
// special value, otherwise re-throw the
// error
if (err && err.code === 'NoSuchKey') {
return Promise.resolve(null);
}
throw err;
}),
s3.completeMultipartUpload({
Bucket: destBucketName,
Key: destObjName,
UploadId: uploadId,
MultipartUpload: {
Parts: [
{ ETag: etag, PartNumber: 1 },
],
},
}).promise(),
]);
}).then(([uploadRes, completeRes]) => {
// if upload succeeded before CompleteMPU finished
if (uploadRes !== null) {
assert.strictEqual(uploadRes.ETag, etag);
assert(uploadRes.LastModified);
}
assert.strictEqual(completeRes.Bucket, destBucketName);
assert.strictEqual(completeRes.Key, destObjName);
assert.strictEqual(completeRes.ETag, finalObjETag);
}).then(() => {
process.stdout.write('Getting object put by MPU with ' +
'overwrite part');
return s3.getObject({
Bucket: destBucketName,
Key: destObjName,
}).promise();
}).then(res => {
assert.strictEqual(res.ETag, finalObjETag);
});
});
});
it('should return an error if no such upload initiated',

View File

@ -86,7 +86,7 @@ describe('GET object legal hold', () => {
s3.getObjectLegalHold({
Bucket: bucket,
Key: key,
VersionId: '000000000000',
VersionId: '012345678901234567890123456789012',
}, err => {
checkError(err, 'NoSuchVersion', 404);
done();

View File

@ -3,18 +3,7 @@ const async = require('async');
const withV4 = require('../support/withV4');
const BucketUtility = require('../../lib/utility/bucket-util');
const { maximumAllowedPartCount } = require('../../../../../constants');
const bucket = 'mpu-test-bucket';
const object = 'mpu-test-object';
const bodySize = 1024 * 1024 * 5;
const bodyContent = 'a';
const howManyParts = 3;
const partNumbers = Array.from(Array(howManyParts).keys());
const invalidPartNumbers = [-1, 0, maximumAllowedPartCount + 1];
let ETags = [];
const objectConfigs = require('../support/objectConfigs');
function checkError(err, statusCode, code) {
assert.strictEqual(err.statusCode, statusCode);
@ -26,128 +15,154 @@ function checkNoError(err) {
`Expected success, got error ${JSON.stringify(err)}`);
}
function generateContent(partNumber) {
return Buffer.alloc(bodySize + partNumber, bodyContent);
function generateContent(size, bodyContent) {
return Buffer.alloc(size, bodyContent);
}
describe('Part size tests with object head', () => {
withV4(sigCfg => {
let bucketUtil;
let s3;
objectConfigs.forEach(config => {
describe(config.signature, () => {
let ETags = [];
function headObject(fields, cb) {
s3.headObject(Object.assign({
Bucket: bucket,
Key: object,
}, fields), cb);
}
const {
bucket,
object,
bodySize,
bodyContent,
partNumbers,
invalidPartNumbers,
} = config;
beforeEach(function beforeF(done) {
bucketUtil = new BucketUtility('default', sigCfg);
s3 = bucketUtil.s3;
withV4(sigCfg => { //eslint-disable-line
let bucketUtil;
let s3;
async.waterfall([
next => s3.createBucket({ Bucket: bucket }, err => next(err)),
next => s3.createMultipartUpload({ Bucket: bucket,
Key: object }, (err, data) => {
checkNoError(err);
this.currentTest.UploadId = data.UploadId;
return next();
}),
next => async.mapSeries(partNumbers, (partNumber, callback) => {
const uploadPartParams = {
Bucket: bucket,
Key: object,
PartNumber: partNumber + 1,
UploadId: this.currentTest.UploadId,
Body: generateContent(partNumber + 1),
};
beforeEach(function beforeF(done) {
bucketUtil = new BucketUtility('default', sigCfg);
s3 = bucketUtil.s3;
return s3.uploadPart(uploadPartParams,
(err, data) => {
if (err) {
return callback(err);
async.waterfall([
next => s3.createBucket({ Bucket: bucket }, err => next(err)),
next => s3.createMultipartUpload({ Bucket: bucket,
Key: object }, (err, data) => {
checkNoError(err);
this.currentTest.UploadId = data.UploadId;
return next();
}),
next => async.mapSeries(partNumbers, (partNumber, callback) => {
let allocAmount = bodySize + partNumber + 1;
if (config.signature === 'for empty object') {
allocAmount = 0;
}
return callback(null, data.ETag);
});
}, (err, results) => {
checkNoError(err);
ETags = results;
return next();
}),
next => {
const params = {
Bucket: bucket,
Key: object,
MultipartUpload: {
Parts: partNumbers.map(partNumber => ({
ETag: ETags[partNumber],
const uploadPartParams = {
Bucket: bucket,
Key: object,
PartNumber: partNumber + 1,
})),
UploadId: this.currentTest.UploadId,
Body: generateContent(allocAmount, bodyContent),
};
return s3.uploadPart(uploadPartParams,
(err, data) => {
if (err) {
return callback(err);
}
return callback(null, data.ETag);
});
}, (err, results) => {
checkNoError(err);
ETags = results;
return next();
}),
next => {
const params = {
Bucket: bucket,
Key: object,
MultipartUpload: {
Parts: partNumbers.map(partNumber => ({
ETag: ETags[partNumber],
PartNumber: partNumber + 1,
})),
},
UploadId: this.currentTest.UploadId,
};
return s3.completeMultipartUpload(params, next);
},
UploadId: this.currentTest.UploadId,
};
return s3.completeMultipartUpload(params, next);
},
], err => {
checkNoError(err);
done();
});
});
], err => {
checkNoError(err);
done();
});
});
afterEach(done => {
async.waterfall([
next => s3.deleteObject({ Bucket: bucket, Key: object },
err => next(err)),
next => s3.deleteBucket({ Bucket: bucket }, err => next(err)),
], done);
});
afterEach(done => {
async.waterfall([
next => s3.deleteObject({ Bucket: bucket, Key: object },
err => next(err)),
next => s3.deleteBucket({ Bucket: bucket }, err => next(err)),
], done);
});
it('should return the total size of the object ' +
'when --part-number is not used', done => {
const totalSize = partNumbers.reduce((total, current) =>
total + (bodySize + current + 1), 0);
headObject({}, (err, data) => {
checkNoError(err);
assert.equal(totalSize, data.ContentLength);
done();
});
});
it('should return the total size of the object ' +
'when --part-number is not used', done => {
const totalSize = config.meta.computeTotalSize(partNumbers, bodySize);
partNumbers.forEach(part => {
it(`should return the size of part ${part + 1} ` +
`when --part-number is set to ${part + 1}`, done => {
const partNumber = Number.parseInt(part, 0) + 1;
const partSize = bodySize + partNumber;
headObject({ PartNumber: partNumber }, (err, data) => {
checkNoError(err);
assert.equal(partSize, data.ContentLength);
done();
s3.headObject({ Bucket: bucket, Key: object }, (err, data) => {
checkNoError(err);
assert.equal(totalSize, data.ContentLength);
done();
});
});
partNumbers.forEach(part => {
it(`should return the size of part ${part + 1} ` +
`when --part-number is set to ${part + 1}`, done => {
const partNumber = Number.parseInt(part, 0) + 1;
const partSize = bodySize + partNumber;
s3.headObject({ Bucket: bucket, Key: object, PartNumber: partNumber }, (err, data) => {
checkNoError(err);
if (data.ContentLength === 0) {
done();
}
assert.equal(partSize, data.ContentLength);
done();
});
});
});
invalidPartNumbers.forEach(part => {
it(`should return an error when --part-number is set to ${part}`,
done => {
s3.headObject({ Bucket: bucket, Key: object, PartNumber: part }, (err, data) => {
checkError(err, 400, 'BadRequest');
assert.strictEqual(data, null);
done();
});
});
});
it('when incorrect --part-number is used', done => {
bucketUtil = new BucketUtility('default', sigCfg);
s3 = bucketUtil.s3;
s3.headObject({ Bucket: bucket, Key: object, PartNumber: partNumbers.length + 1 },
(err, data) => {
if (config.meta.objectIsEmpty) {
// returns metadata for the only empty part
checkNoError(err);
assert.strictEqual(data.ContentLength, 0);
done();
} else {
// returns a 416 error
// the error response does not contain the actual
// statusCode instead it has '416'
checkError(err, 416, 416);
assert.strictEqual(data, null);
done();
}
});
});
});
});
invalidPartNumbers.forEach(part => {
it(`should return an error when --part-number is set to ${part}`,
done => {
headObject({ PartNumber: part }, (err, data) => {
checkError(err, 400, 'BadRequest');
assert.strictEqual(data, null);
done();
});
});
});
it('should return an error when incorrect --part-number is used',
done => {
headObject({ PartNumber: partNumbers.length + 1 },
(err, data) => {
// the error response does not contain the actual
// statusCode instead it has '416'
checkError(err, 416, 416);
assert.strictEqual(data, null);
done();
});
});
});
});

View File

@ -106,7 +106,7 @@ describe('GET object retention', () => {
s3.getObjectRetention({
Bucket: bucketName,
Key: objectName,
VersionId: '000000000000',
VersionId: '012345678901234567890123456789012',
}, err => {
checkError(err, 'NoSuchVersion', 404);
done();

View File

@ -98,7 +98,7 @@ describe('PUT object legal hold', () => {
s3.putObjectLegalHold({
Bucket: bucket,
Key: key,
VersionId: '000000000000',
VersionId: '012345678901234567890123456789012',
LegalHold: mockLegalHold.on,
}, err => {
checkError(err, 'NoSuchVersion', 404);

View File

@ -79,7 +79,7 @@ describe('PUT object retention', () => {
s3.putObjectRetention({
Bucket: bucketName,
Key: objectName,
VersionId: '000000000000',
VersionId: '012345678901234567890123456789012',
Retention: retentionConfig,
}, err => {
checkError(err, 'NoSuchVersion', 404);

View File

@ -0,0 +1,40 @@
const { maximumAllowedPartCount } = require('../../../../../constants');
const canonicalObjectConfig = {
bucket: 'mpu-test-bucket-canonical-object',
object: 'mpu-test-object-canonical',
bodySize: 1024 * 1024 * 5,
bodyContent: 'a',
howManyParts: 3,
partNumbers: Array.from(Array(3).keys()), // 3 corresponds to howManyParts
invalidPartNumbers: [-1, 0, maximumAllowedPartCount + 1],
signature: 'for canonical object',
meta: {
computeTotalSize: (partNumbers, bodySize) => partNumbers.reduce((total, current) =>
total + bodySize + current + 1
, 0),
objectIsEmpty: false,
},
};
const emptyObjectConfig = {
bucket: 'mpu-test-bucket-empty-object',
object: 'mpu-test-object-empty',
bodySize: 0,
bodyContent: null,
howManyParts: 1,
partNumbers: Array.from(Array(1).keys()), // 1 corresponds to howManyParts
invalidPartNumbers: [-1, 0, maximumAllowedPartCount + 1],
signature: 'for empty object',
meta: {
computeTotalSize: () => 0,
objectIsEmpty: true,
},
};
const objectConfigs = [
canonicalObjectConfig,
emptyObjectConfig,
];
module.exports = objectConfigs;

View File

@ -27,6 +27,7 @@ const testData = 'testkey data';
const testDataMd5 = crypto.createHash('md5')
.update(testData, 'utf-8')
.digest('hex');
const emptyContentsMd5 = 'd41d8cd98f00b204e9800998ecf8427e';
const testMd = {
'md-model-version': 2,
'owner-display-name': 'Bart',
@ -60,6 +61,17 @@ const testMd = {
},
};
function checkObjectData(s3, objectKey, dataValue, done) {
s3.getObject({
Bucket: TEST_BUCKET,
Key: objectKey,
}, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Body.toString(), dataValue);
done();
});
}
/** makeBackbeatRequest - utility function to generate a request going
* through backbeat route
* @param {object} params - params for making request
@ -416,8 +428,8 @@ describeSkipIfAWS('backbeat routes', () => {
});
});
it('should remove old object data locations if version is overwritten',
done => {
it('should remove old object data locations if version is overwritten ' +
'with same contents', done => {
let oldLocation;
const testKeyOldData = `${testKey}-old-data`;
async.waterfall([next => {
@ -491,14 +503,8 @@ describeSkipIfAWS('backbeat routes', () => {
}, (response, next) => {
assert.strictEqual(response.statusCode, 200);
// give some time for the async deletes to complete
setTimeout(() => s3.getObject({
Bucket: TEST_BUCKET,
Key: testKey,
}, (err, data) => {
assert.ifError(err);
assert.strictEqual(data.Body.toString(), testData);
next();
}), 1000);
setTimeout(() => checkObjectData(s3, testKey, testData, next),
1000);
}, next => {
// check that the object copy referencing the old data
// locations is unreadable, confirming that the old
@ -516,6 +522,89 @@ describeSkipIfAWS('backbeat routes', () => {
done();
});
});
it('should remove old object data locations if version is overwritten ' +
'with empty contents', done => {
let oldLocation;
const testKeyOldData = `${testKey}-old-data`;
async.waterfall([next => {
// put object's data locations
makeBackbeatRequest({
method: 'PUT', bucket: TEST_BUCKET,
objectKey: testKey,
resourceType: 'data',
headers: {
'content-length': testData.length,
'content-md5': testDataMd5,
'x-scal-canonical-id': testArn,
},
authCredentials: backbeatAuthCredentials,
requestBody: testData }, next);
}, (response, next) => {
assert.strictEqual(response.statusCode, 200);
// put object metadata
const newMd = Object.assign({}, testMd);
newMd.location = JSON.parse(response.body);
oldLocation = newMd.location;
makeBackbeatRequest({
method: 'PUT', bucket: TEST_BUCKET,
objectKey: testKey,
resourceType: 'metadata',
authCredentials: backbeatAuthCredentials,
requestBody: JSON.stringify(newMd),
}, next);
}, (response, next) => {
assert.strictEqual(response.statusCode, 200);
// put another object which metadata reference the
// same data locations, we will attempt to retrieve
// this object at the end of the test to confirm that
// its locations have been deleted
const oldDataMd = Object.assign({}, testMd);
oldDataMd.location = oldLocation;
makeBackbeatRequest({
method: 'PUT', bucket: TEST_BUCKET,
objectKey: testKeyOldData,
resourceType: 'metadata',
authCredentials: backbeatAuthCredentials,
requestBody: JSON.stringify(oldDataMd),
}, next);
}, (response, next) => {
assert.strictEqual(response.statusCode, 200);
// overwrite the original object version with an empty location
const newMd = Object.assign({}, testMd);
newMd['content-length'] = 0;
newMd['content-md5'] = emptyContentsMd5;
newMd.location = null;
makeBackbeatRequest({
method: 'PUT', bucket: TEST_BUCKET,
objectKey: testKey,
resourceType: 'metadata',
authCredentials: backbeatAuthCredentials,
requestBody: JSON.stringify(newMd),
}, next);
}, (response, next) => {
assert.strictEqual(response.statusCode, 200);
// give some time for the async deletes to complete
setTimeout(() => checkObjectData(s3, testKey, '', next),
1000);
}, next => {
// check that the object copy referencing the old data
// locations is unreadable, confirming that the old
// data locations have been deleted
s3.getObject({
Bucket: TEST_BUCKET,
Key: testKeyOldData,
}, err => {
assert(err, 'expected error to get object with old data ' +
'locations, got success');
next();
});
}], err => {
assert.ifError(err);
done();
});
});
it('should not remove data locations on replayed metadata PUT',
done => {
let serializedNewMd;

View File

@ -3,14 +3,15 @@ const assert = require('assert');
const BucketInfo = require('arsenal').models.BucketInfo;
const getReplicationInfo =
require('../../../../lib/api/apiUtils/object/getReplicationInfo');
const { makeAuthInfo } = require('../../helpers');
function _getObjectReplicationInfo(replicationConfig) {
function _getObjectReplicationInfo(replicationConfig, authInfo, isDeleteMarker) {
const bucketInfo = new BucketInfo(
'testbucket', 'someCanonicalId', 'accountDisplayName',
new Date().toJSON(),
null, null, null, null, null, null, null, null, null,
replicationConfig);
return getReplicationInfo('fookey', bucketInfo, true, 123, null, null);
return getReplicationInfo('fookey', bucketInfo, true, 123, null, null, authInfo, isDeleteMarker);
}
describe('getReplicationInfo helper', () => {
@ -40,6 +41,65 @@ describe('getReplicationInfo helper', () => {
});
});
it('should get replication info when action comming from a non-lifecycle session', () => {
const replicationConfig = {
role: 'arn:aws:iam::root:role/s3-replication-role',
rules: [{
prefix: '',
enabled: true,
storageClass: 'awsbackend',
}],
destination: 'tosomewhere',
};
const authInfo = makeAuthInfo('accessKey1', null, 'another-session');
const replicationInfo = _getObjectReplicationInfo(replicationConfig, authInfo, true);
assert.deepStrictEqual(replicationInfo, {
status: 'PENDING',
backends: [{
site: 'awsbackend',
status: 'PENDING',
dataStoreVersionId: '',
}],
content: ['METADATA'],
destination: 'tosomewhere',
storageClass: 'awsbackend',
role: 'arn:aws:iam::root:role/s3-replication-role',
storageType: 'aws_s3',
});
});
it('should get replication info when action comming from a lifecycle session ' +
'but action is not delete marker', () => {
const replicationConfig = {
role: 'arn:aws:iam::root:role/s3-replication-role',
rules: [{
prefix: '',
enabled: true,
storageClass: 'awsbackend',
}],
destination: 'tosomewhere',
};
const authInfo = makeAuthInfo('accessKey1', null, 'backbeat-lifecycle');
const replicationInfo = _getObjectReplicationInfo(replicationConfig, authInfo, false);
assert.deepStrictEqual(replicationInfo, {
status: 'PENDING',
backends: [{
site: 'awsbackend',
status: 'PENDING',
dataStoreVersionId: '',
}],
content: ['METADATA'],
destination: 'tosomewhere',
storageClass: 'awsbackend',
role: 'arn:aws:iam::root:role/s3-replication-role',
storageType: 'aws_s3',
});
});
it('should not get replication info when rules are disabled', () => {
const replicationConfig = {
role: 'arn:aws:iam::root:role/s3-replication-role',
@ -53,4 +113,21 @@ describe('getReplicationInfo helper', () => {
const replicationInfo = _getObjectReplicationInfo(replicationConfig);
assert.deepStrictEqual(replicationInfo, undefined);
});
it('should not get replication info when action comming from lifecycle session', () => {
const replicationConfig = {
role: 'arn:aws:iam::root:role/s3-replication-role',
rules: [{
prefix: '',
enabled: true,
storageClass: 'awsbackend',
}],
destination: 'tosomewhere',
};
const authInfo = makeAuthInfo('accessKey1', null, 'backbeat-lifecycle');
const replicationInfo = _getObjectReplicationInfo(replicationConfig, authInfo, true);
assert.deepStrictEqual(replicationInfo, undefined);
});
});

View File

@ -38,4 +38,16 @@ describe('Check if location keys have changed between object locations', () => {
const curr = [{ key: 'ddd' }, { key: 'eee' }, { key: 'fff' }];
assert.strictEqual(locationKeysHaveChanged(prev, curr), true);
});
it('should return true if curr location is null', () => {
const prev = [{ key: 'ddd' }, { key: 'eee' }, { key: 'fff' }];
const curr = null;
assert.strictEqual(locationKeysHaveChanged(prev, curr), true);
});
it('should return true if both prev and curr locations are null', () => {
const prev = null;
const curr = null;
assert.strictEqual(locationKeysHaveChanged(prev, curr), true);
});
});

View File

@ -0,0 +1,41 @@
const assert = require('assert');
const { isLifecycleSession } =
require('../../../../lib/api/apiUtils/authorization/permissionChecks.js');
const tests = [
{
arn: 'arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle',
description: 'a role assumed by lifecycle service',
expectedResult: true,
},
{
arn: undefined,
description: 'undefined',
expectedResult: false,
},
{
arn: '',
description: 'empty',
expectedResult: false,
},
{
arn: 'arn:aws:iam::257038443293:user/bart',
description: 'a user',
expectedResult: false,
},
{
arn: 'arn:aws:sts::257038443293:assumed-role/rolename/other-service',
description: 'a role assumed by another service',
expectedResult: false,
},
];
describe('authInfoHelper', () => {
tests.forEach(t => {
it(`should return ${t.expectedResult} if arn is ${t.description}`, () => {
const result = isLifecycleSession(t.arn);
assert.equal(result, t.expectedResult);
});
});
});

View File

@ -313,6 +313,9 @@ describe('bucketGet API V2', () => {
assert.strictEqual(keyCount, keysReturned);
// assert the results from tests
test.assertion(result);
if (result.ListBucketResult.IsTruncated && result.ListBucketResult.IsTruncated[0] === 'false') {
assert.strictEqual(result.ListBucketResult.NextContinuationToken, undefined);
}
done();
});
});

View File

@ -1630,6 +1630,78 @@ describe('Multipart Upload API', () => {
});
});
it('should leave orphaned data when overwriting an object part during completeMPU',
done => {
const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024);
const overWritePart = Buffer.from('Overwrite content', 'utf8');
let uploadId;
async.waterfall([
next => bucketPut(authInfo, bucketPutRequest, log, next),
(corsHeaders, next) => initiateMultipartUpload(authInfo,
initiateRequest, log, next),
(result, corsHeaders, next) => parseString(result, next),
(json, next) => {
uploadId = json.InitiateMultipartUploadResult.UploadId[0];
const requestObj = {
bucketName,
namespace,
objectKey,
headers: { host: `${bucketName}.s3.amazonaws.com` },
url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`,
query: {
partNumber: '1',
uploadId,
},
};
const partRequest = new DummyRequest(requestObj, fullSizedPart);
objectPutPart(authInfo, partRequest, undefined, log, (err, partCalculatedHash) => {
assert.deepStrictEqual(err, null);
next(null, requestObj, partCalculatedHash);
});
},
(requestObj, partCalculatedHash, next) => {
assert.deepStrictEqual(ds[1].value, fullSizedPart);
async.parallel([
done => {
const partRequest = new DummyRequest(requestObj, overWritePart);
objectPutPart(authInfo, partRequest, undefined, log, err => {
assert.deepStrictEqual(err, null);
done();
});
},
done => {
const completeBody = '<CompleteMultipartUpload>' +
'<Part>' +
'<PartNumber>1</PartNumber>' +
`<ETag>"${partCalculatedHash}"</ETag>` +
'</Part>' +
'</CompleteMultipartUpload>';
const completeRequest = {
bucketName,
namespace,
objectKey,
parsedHost: 's3.amazonaws.com',
url: `/${objectKey}?uploadId=${uploadId}`,
headers: { host: `${bucketName}.s3.amazonaws.com` },
query: { uploadId },
post: completeBody,
};
completeMultipartUpload(authInfo, completeRequest, log, done);
},
], err => next(err));
},
],
err => {
assert.deepStrictEqual(err, null);
assert.strictEqual(ds[0], undefined);
assert.deepStrictEqual(ds[1].value, fullSizedPart);
assert.deepStrictEqual(ds[2].value, overWritePart);
done();
});
});
it('should throw an error on put of an object part with an invalid ' +
'uploadId', done => {
const testUploadId = 'invalidUploadID';
@ -1829,12 +1901,22 @@ describe('complete mpu with versioning', () => {
},
(eTag, testUploadId, next) => {
const origPutObject = metadataBackend.putObject;
let callCount = 0;
metadataBackend.putObject =
(bucketName, objName, objVal, params, log, cb) => {
assert.strictEqual(params.replayId, testUploadId);
metadataBackend.putObject = origPutObject;
metadataBackend.putObject(
bucketName, objName, objVal, params, log, cb);
(putBucketName, objName, objVal, params, log, cb) => {
if (callCount === 0) {
// first putObject sets the completeInProgress flag in the overview key
assert.strictEqual(putBucketName, `${constants.mpuBucketPrefix}${bucketName}`);
assert.strictEqual(
objName, `overview${splitter}${objectKey}${splitter}${testUploadId}`);
assert.strictEqual(objVal.completeInProgress, true);
} else {
assert.strictEqual(params.replayId, testUploadId);
metadataBackend.putObject = origPutObject;
}
origPutObject(
putBucketName, objName, objVal, params, log, cb);
callCount += 1;
};
const parts = [{ partNumber: 1, eTag }];
const completeRequest = _createCompleteMpuRequest(testUploadId,
@ -1891,12 +1973,22 @@ describe('complete mpu with versioning', () => {
},
(eTag, testUploadId, next) => {
const origPutObject = metadataBackend.putObject;
let callCount = 0;
metadataBackend.putObject =
(bucketName, objName, objVal, params, log, cb) => {
assert.strictEqual(params.replayId, testUploadId);
metadataBackend.putObject = origPutObject;
metadataBackend.putObject(
bucketName, objName, objVal, params, log, cb);
(putBucketName, objName, objVal, params, log, cb) => {
if (callCount === 0) {
// first putObject sets the completeInProgress flag in the overview key
assert.strictEqual(putBucketName, `${constants.mpuBucketPrefix}${bucketName}`);
assert.strictEqual(
objName, `overview${splitter}${objectKey}${splitter}${testUploadId}`);
assert.strictEqual(objVal.completeInProgress, true);
} else {
assert.strictEqual(params.replayId, testUploadId);
metadataBackend.putObject = origPutObject;
}
origPutObject(
putBucketName, objName, objVal, params, log, cb);
callCount += 1;
};
const parts = [{ partNumber: 1, eTag }];
const completeRequest = _createCompleteMpuRequest(testUploadId,

View File

@ -99,7 +99,8 @@ describe('objectGet API', () => {
url: `/${bucketName}/${objectName}`,
}, postBody);
const testDate = new Date(2022, 6, 3).toISOString();
const threeDaysMilliSecs = 3 * 24 * 60 * 60 * 1000;
const testDate = new Date(Date.now() + threeDaysMilliSecs).toISOString();
it('should get the object metadata with valid retention info', done => {
bucketPut(authInfo, testPutBucketRequestObjectLock, log, () => {

View File

@ -64,7 +64,7 @@ function timeDiff(startTime) {
return milliseconds;
}
function makeAuthInfo(accessKey, userName) {
function makeAuthInfo(accessKey, userName, sessionName) {
const canIdMap = {
accessKey1: '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7'
+ 'cd47ef2be',
@ -92,6 +92,11 @@ function makeAuthInfo(accessKey, userName) {
params.arn = `arn:aws:iam::${shortid}:user/${userName}`;
}
if (sessionName) {
params.IAMdisplayName = `[assumedRole] rolename:${sessionName}`;
params.arn = `arn:aws:sts::${shortid}:assumed-role/rolename/${sessionName}`;
}
return new AuthInfo(params);
}

View File

@ -361,6 +361,35 @@ arraybuffer.slice@~0.0.7:
optionalDependencies:
ioctl "^2.0.2"
arsenal@scality/Arsenal#7.10.11:
version "7.10.11"
resolved "https://codeload.github.com/scality/Arsenal/tar.gz/4303cd8f5b3dc9f1f47d382e5355c7e357e4c5f2"
dependencies:
"@hapi/joi" "^15.1.0"
JSONStream "^1.0.0"
agentkeepalive "^4.1.3"
ajv "6.12.2"
async "~2.1.5"
base-x "3.0.8"
base62 "2.0.1"
debug "~2.6.9"
diskusage "^1.1.1"
ioredis "4.9.5"
ipaddr.js "1.9.1"
level "~5.0.1"
level-sublevel "~6.6.5"
node-forge "^0.7.1"
prom-client "10.2.3"
simple-glob "^0.2"
socket.io "~2.3.0"
socket.io-client "~2.3.0"
utf8 "2.1.2"
uuid "^3.0.1"
werelogs scality/werelogs#8.1.0
xml2js "~0.4.23"
optionalDependencies:
ioctl "^2.0.2"
arsenal@scality/Arsenal#7.10.9:
version "7.10.9"
resolved "https://codeload.github.com/scality/Arsenal/tar.gz/2f40ff388327b17728926bf99fe82b0e6a14a01d"
@ -4736,6 +4765,16 @@ vaultclient@scality/vaultclient#7.10.2:
werelogs scality/werelogs#8.1.0
xml2js "0.4.19"
vaultclient@scality/vaultclient#7.10.6:
version "7.10.6"
resolved "https://codeload.github.com/scality/vaultclient/tar.gz/98cccf64e6976eb1c35cd6908cc560ab6fbf6969"
dependencies:
agentkeepalive "^4.1.3"
arsenal scality/Arsenal#7.10.11
commander "2.20.0"
werelogs scality/werelogs#8.1.0
xml2js "0.4.19"
verror@1.10.0:
version "1.10.0"
resolved "https://registry.yarnpkg.com/verror/-/verror-1.10.0.tgz#3a105ca17053af55d6e270c1f8288682e18da400"