Compare commits
12 Commits
developmen
...
temp/CDSRV
Author | SHA1 | Date |
---|---|---|
williamlardier | a155026abe | |
williamlardier | 36e69648fd | |
Jonathan Gramain | 20f9662f54 | |
Taylor McKinnon | a77b40a2a2 | |
Jonathan Gramain | 75410f0ba4 | |
Artem Bakalov | 73c364acc8 | |
Artem Bakalov | c37007782c | |
Jonathan Gramain | 08c8599e1d | |
Jonathan Gramain | 9bdab63401 | |
Jonathan Gramain | 189f3cf41b | |
Jonathan Gramain | 30067dfc73 | |
Nicolas Humbert | ad3d9a9f5f |
|
@ -173,6 +173,10 @@ const constants = {
|
|||
'bucket',
|
||||
],
|
||||
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
||||
// The AWS assumed Role resource type
|
||||
assumedRoleArnResourceType: 'assumed-role',
|
||||
// Session name of the backbeat lifecycle assumed role session.
|
||||
backbeatLifecycleSessionName: 'backbeat-lifecycle',
|
||||
};
|
||||
|
||||
module.exports = constants;
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
const { evaluators, actionMaps, RequestContext } = require('arsenal').policies;
|
||||
const constants = require('../../../../constants');
|
||||
|
||||
const { allAuthedUsersId, bucketOwnerActions, logId, publicId } = constants;
|
||||
const { allAuthedUsersId, bucketOwnerActions, logId, publicId,
|
||||
assumedRoleArnResourceType, backbeatLifecycleSessionName } = constants;
|
||||
|
||||
// whitelist buckets to allow public read on objects
|
||||
const publicReadBuckets = process.env.ALLOW_PUBLIC_READ_BUCKETS ?
|
||||
|
@ -364,10 +365,34 @@ function validatePolicyResource(bucketName, policy) {
|
|||
});
|
||||
}
|
||||
|
||||
/** isLifecycleSession - check if it is the Lifecycle assumed role session arn.
|
||||
* @param {string} arn - Amazon resource name - example:
|
||||
* arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle
|
||||
* @return {boolean} true if Lifecycle assumed role session arn, false if not.
|
||||
*/
|
||||
function isLifecycleSession(arn) {
|
||||
if (!arn) {
|
||||
return false;
|
||||
}
|
||||
|
||||
const arnSplits = arn.split(':');
|
||||
const service = arnSplits[2];
|
||||
|
||||
const resourceNames = arnSplits[arnSplits.length - 1].split('/');
|
||||
|
||||
const resourceType = resourceNames[0];
|
||||
const sessionName = resourceNames[resourceNames.length - 1];
|
||||
|
||||
return (service === 'sts' &&
|
||||
resourceType === assumedRoleArnResourceType &&
|
||||
sessionName === backbeatLifecycleSessionName);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
isBucketAuthorized,
|
||||
isObjAuthorized,
|
||||
checkBucketAcls,
|
||||
checkObjectAcls,
|
||||
validatePolicyResource,
|
||||
isLifecycleSession,
|
||||
};
|
||||
|
|
|
@ -136,9 +136,10 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
|||
size,
|
||||
headers,
|
||||
isDeleteMarker,
|
||||
replicationInfo: getReplicationInfo(objectKey, bucketMD, false, size),
|
||||
replicationInfo: getReplicationInfo(objectKey, bucketMD, false, size, null, null, authInfo, isDeleteMarker),
|
||||
log,
|
||||
};
|
||||
|
||||
if (!isDeleteMarker) {
|
||||
metadataStoreParams.contentType = request.headers['content-type'];
|
||||
metadataStoreParams.cacheControl = request.headers['cache-control'];
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
const s3config = require('../../../Config').config;
|
||||
const { isLifecycleSession } = require('../authorization/permissionChecks.js');
|
||||
|
||||
function _getBackend(objectMD, site) {
|
||||
const backends = objectMD ? objectMD.replicationInfo.backends : [];
|
||||
|
@ -63,14 +64,22 @@ function _getReplicationInfo(rule, replicationConfig, content, operationType,
|
|||
* @param {boolean} objSize - The size, in bytes, of the object being PUT
|
||||
* @param {string} operationType - The type of operation to replicate
|
||||
* @param {object} objectMD - The object metadata
|
||||
* @param {AuthInfo} [authInfo] - authentication info of object owner
|
||||
* @param {boolean} [isDeleteMarker] - whether creating a delete marker
|
||||
* @return {undefined}
|
||||
*/
|
||||
function getReplicationInfo(objKey, bucketMD, isMD, objSize, operationType,
|
||||
objectMD) {
|
||||
objectMD, authInfo, isDeleteMarker) {
|
||||
const content = isMD || objSize === 0 ? ['METADATA'] : ['DATA', 'METADATA'];
|
||||
const config = bucketMD.getReplicationConfiguration();
|
||||
// If bucket does not have a replication configuration, do not replicate.
|
||||
if (config) {
|
||||
// If delete an object due to a lifecycle action,
|
||||
// the delete marker is not replicated to the destination buckets.
|
||||
if (isDeleteMarker && authInfo && isLifecycleSession(authInfo.getArn())) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const rule = config.rules.find(rule =>
|
||||
(objKey.startsWith(rule.prefix) && rule.enabled));
|
||||
if (rule) {
|
||||
|
|
|
@ -8,12 +8,13 @@
|
|||
*
|
||||
* @param {array|string|null} prev - list of keys from the object being
|
||||
* overwritten
|
||||
* @param {array} curr - list of keys to be used in composing current object
|
||||
* @param {array|null} curr - list of keys to be used in composing
|
||||
* current object
|
||||
* @returns {boolean} true if no key in `curr` is present in `prev`,
|
||||
* false otherwise
|
||||
*/
|
||||
function locationKeysHaveChanged(prev, curr) {
|
||||
if (!prev || prev.length === 0) {
|
||||
if (!prev || prev.length === 0 || !curr) {
|
||||
return true;
|
||||
}
|
||||
// backwards compatibility check if object is of model version 2
|
||||
|
|
|
@ -193,7 +193,7 @@ class ObjectLockInfo {
|
|||
* @returns {bool} - True if the given timestamp is after the policy expiration date or if no expiration date is set
|
||||
*/
|
||||
isExtended(timestamp) {
|
||||
return timestamp !== undefined && (this.date === null || moment(timestamp).isAfter(this.date));
|
||||
return timestamp !== undefined && (this.date === null || moment(timestamp).isSameOrAfter(this.date));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -157,6 +157,22 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
|||
}
|
||||
return next(errors.MalformedXML, destBucket);
|
||||
},
|
||||
function markOverviewForCompletion(destBucket, objMD, mpuBucket, jsonList,
|
||||
storedMetadata, location, mpuOverviewKey, next) {
|
||||
return services.metadataMarkMPObjectForCompletion({
|
||||
bucketName: mpuBucket.getName(),
|
||||
objectKey,
|
||||
uploadId,
|
||||
splitter,
|
||||
storedMetadata,
|
||||
}, log, err => {
|
||||
if (err) {
|
||||
return next(err);
|
||||
}
|
||||
return next(null, destBucket, objMD, mpuBucket,
|
||||
jsonList, storedMetadata, location, mpuOverviewKey);
|
||||
});
|
||||
},
|
||||
function retrieveParts(destBucket, objMD, mpuBucket, jsonList,
|
||||
storedMetadata, location, mpuOverviewKey, next) {
|
||||
return services.getMPUparts(mpuBucket.getName(), uploadId, log,
|
||||
|
|
|
@ -3,6 +3,8 @@ const crypto = require('crypto');
|
|||
const async = require('async');
|
||||
const { parseString } = require('xml2js');
|
||||
const { auth, errors, versioning, s3middleware, policies } = require('arsenal');
|
||||
const { data } = require('../data/wrapper');
|
||||
const logger = require('../utilities/logger');
|
||||
|
||||
const escapeForXml = s3middleware.escapeForXml;
|
||||
const { pushMetric } = require('../utapi/utilities');
|
||||
|
@ -191,9 +193,11 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
|||
const skipError = new Error('skip');
|
||||
const objectLockedError = new Error('object locked');
|
||||
|
||||
let deleteFromStorage = [];
|
||||
|
||||
// doing 5 requests at a time. note that the data wrapper
|
||||
// will do 5 parallel requests to data backend to delete parts
|
||||
return async.forEachLimit(inPlay, 5, (entry, moveOn) => {
|
||||
// will do 50 parallel requests to data backend to delete parts
|
||||
return async.forEachLimit(inPlay, 50, (entry, moveOn) => {
|
||||
async.waterfall([
|
||||
callback => {
|
||||
let decodedVersionId;
|
||||
|
@ -287,9 +291,14 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
|||
const deleteInfo = {};
|
||||
if (options && options.deleteData) {
|
||||
deleteInfo.deleted = true;
|
||||
return services.deleteObject(bucketName, objMD,
|
||||
entry.key, options, log, err =>
|
||||
callback(err, objMD, deleteInfo));
|
||||
return services.deleteObjectWithDeferredStorage(bucketName, objMD,
|
||||
entry.key, options, log, (err, res, objectsToDelete) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
deleteFromStorage = deleteFromStorage.concat(objectsToDelete);
|
||||
return callback(null, objMD, deleteInfo);
|
||||
});
|
||||
}
|
||||
deleteInfo.newDeleteMarker = true;
|
||||
// This call will create a delete-marker
|
||||
|
@ -340,9 +349,25 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
|||
},
|
||||
// end of forEach func
|
||||
err => {
|
||||
log.trace('finished deleting objects', { numOfObjectsRemoved });
|
||||
return next(err, quietSetting, errorResults, numOfObjectsRemoved,
|
||||
// Batch delete all objects
|
||||
const onDone = () => next(err, quietSetting, errorResults, numOfObjectsRemoved,
|
||||
successfullyDeleted, totalContentLengthDeleted, bucket);
|
||||
|
||||
if (err && deleteFromStorage.length === 0) {
|
||||
log.trace('no objects to delete from data backend');
|
||||
return onDone();
|
||||
}
|
||||
// If error but we have objects in the list, delete them to ensure
|
||||
// consistent state.
|
||||
log.trace('deleting objects from data backend');
|
||||
const deleteLog = logger.newRequestLoggerFromSerializedUids(log.getSerializedUids());
|
||||
return data.batchDelete(deleteFromStorage, null, null, deleteLog, err => {
|
||||
if (err) {
|
||||
log.error('error deleting objects from data backend', { error: err });
|
||||
return onDone();
|
||||
}
|
||||
return onDone();
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -130,10 +130,12 @@ function objectHead(authInfo, request, log, callback) {
|
|||
return callback(errors.BadRequest, corsHeaders);
|
||||
}
|
||||
const partSize = getPartSize(objMD, partNumber);
|
||||
if (!partSize) {
|
||||
const isEmptyObject = objLength === 0;
|
||||
if (!partSize && !isEmptyObject) {
|
||||
return callback(errors.InvalidRange, corsHeaders);
|
||||
}
|
||||
responseHeaders['content-length'] = partSize;
|
||||
|
||||
responseHeaders['content-length'] = isEmptyObject ? 0 : partSize;
|
||||
const partsCount = getPartCountFromMd5(objMD);
|
||||
if (partsCount) {
|
||||
responseHeaders['x-amz-mp-parts-count'] = partsCount;
|
||||
|
|
|
@ -236,7 +236,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
res.controllingLocationConstraint;
|
||||
return next(null, dataLocator, destBucketMD,
|
||||
destObjLocationConstraint, copyObjectSize,
|
||||
sourceVerId, sourceLocationConstraintName);
|
||||
sourceVerId, sourceLocationConstraintName, splitter);
|
||||
});
|
||||
},
|
||||
function goGetData(
|
||||
|
@ -246,6 +246,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
copyObjectSize,
|
||||
sourceVerId,
|
||||
sourceLocationConstraintName,
|
||||
splitter,
|
||||
next,
|
||||
) {
|
||||
data.uploadPartCopy(
|
||||
|
@ -268,12 +269,12 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
}
|
||||
return next(null, destBucketMD, locations, eTag,
|
||||
copyObjectSize, sourceVerId, serverSideEncryption,
|
||||
lastModified);
|
||||
lastModified, splitter);
|
||||
});
|
||||
},
|
||||
function getExistingPartInfo(destBucketMD, locations, totalHash,
|
||||
copyObjectSize, sourceVerId, serverSideEncryption, lastModified,
|
||||
next) {
|
||||
splitter, next) {
|
||||
const partKey =
|
||||
`${uploadId}${constants.splitter}${paddedPartNumber}`;
|
||||
metadata.getObjectMD(mpuBucketName, partKey, {}, log,
|
||||
|
@ -298,12 +299,12 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
}
|
||||
return next(null, destBucketMD, locations, totalHash,
|
||||
prevObjectSize, copyObjectSize, sourceVerId,
|
||||
serverSideEncryption, lastModified, oldLocations);
|
||||
serverSideEncryption, lastModified, oldLocations, splitter);
|
||||
});
|
||||
},
|
||||
function storeNewPartMetadata(destBucketMD, locations, totalHash,
|
||||
prevObjectSize, copyObjectSize, sourceVerId, serverSideEncryption,
|
||||
lastModified, oldLocations, next) {
|
||||
lastModified, oldLocations, splitter, next) {
|
||||
const metaStoreParams = {
|
||||
partNumber: paddedPartNumber,
|
||||
contentMD5: totalHash,
|
||||
|
@ -319,20 +320,58 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
{ error: err, method: 'storeNewPartMetadata' });
|
||||
return next(err);
|
||||
}
|
||||
return next(null, locations, oldLocations, destBucketMD, totalHash,
|
||||
lastModified, sourceVerId, serverSideEncryption,
|
||||
prevObjectSize, copyObjectSize, splitter);
|
||||
});
|
||||
},
|
||||
function checkCanDeleteOldLocations(partLocations, oldLocations, destBucketMD,
|
||||
totalHash, lastModified, sourceVerId, serverSideEncryption,
|
||||
prevObjectSize, copyObjectSize, splitter, next) {
|
||||
if (!oldLocations) {
|
||||
return next(null, oldLocations, destBucketMD, totalHash,
|
||||
lastModified, sourceVerId, serverSideEncryption,
|
||||
prevObjectSize, copyObjectSize);
|
||||
}
|
||||
return services.isCompleteMPUInProgress({
|
||||
bucketName: destBucketName,
|
||||
objectKey: destObjectKey,
|
||||
uploadId,
|
||||
splitter,
|
||||
}, log, (err, completeInProgress) => {
|
||||
if (err) {
|
||||
return next(err, destBucketMD);
|
||||
}
|
||||
let oldLocationsToDelete = oldLocations;
|
||||
// Prevent deletion of old data if a completeMPU
|
||||
// is already in progress because then there is no
|
||||
// guarantee that the old location will not be the
|
||||
// committed one.
|
||||
if (completeInProgress) {
|
||||
log.warn('not deleting old locations because CompleteMPU is in progress', {
|
||||
method: 'objectPutCopyPart::checkCanDeleteOldLocations',
|
||||
bucketName: destBucketName,
|
||||
objectKey: destObjectKey,
|
||||
uploadId,
|
||||
partLocations,
|
||||
oldLocations,
|
||||
});
|
||||
oldLocationsToDelete = null;
|
||||
}
|
||||
return next(null, oldLocationsToDelete, destBucketMD, totalHash,
|
||||
lastModified, sourceVerId, serverSideEncryption,
|
||||
prevObjectSize, copyObjectSize);
|
||||
});
|
||||
},
|
||||
function cleanupExistingData(oldLocations, destBucketMD, totalHash,
|
||||
function cleanupExistingData(oldLocationsToDelete, destBucketMD, totalHash,
|
||||
lastModified, sourceVerId, serverSideEncryption,
|
||||
prevObjectSize, copyObjectSize, next) {
|
||||
// Clean up the old data now that new metadata (with new
|
||||
// data locations) has been stored
|
||||
if (oldLocations) {
|
||||
if (oldLocationsToDelete) {
|
||||
const delLog = logger.newRequestLoggerFromSerializedUids(
|
||||
log.getSerializedUids());
|
||||
return data.batchDelete(oldLocations, request.method, null,
|
||||
return data.batchDelete(oldLocationsToDelete, request.method, null,
|
||||
delLog, err => {
|
||||
if (err) {
|
||||
// if error, log the error and move on as it is not
|
||||
|
|
|
@ -13,6 +13,7 @@ const kms = require('../kms/wrapper');
|
|||
const metadata = require('../metadata/wrapper');
|
||||
const { pushMetric } = require('../utapi/utilities');
|
||||
const logger = require('../utilities/logger');
|
||||
const services = require('../services');
|
||||
const locationConstraintCheck
|
||||
= require('./apiUtils/object/locationConstraintCheck');
|
||||
const writeContinue = require('../utilities/writeContinue');
|
||||
|
@ -243,19 +244,19 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
|||
}
|
||||
return next(null, destinationBucket,
|
||||
objectLocationConstraint, cipherBundle,
|
||||
partKey, prevObjectSize, oldLocations, partInfo);
|
||||
partKey, prevObjectSize, oldLocations, partInfo, splitter);
|
||||
});
|
||||
},
|
||||
// Store in data backend.
|
||||
(destinationBucket, objectLocationConstraint, cipherBundle,
|
||||
partKey, prevObjectSize, oldLocations, partInfo, next) => {
|
||||
partKey, prevObjectSize, oldLocations, partInfo, splitter, next) => {
|
||||
// NOTE: set oldLocations to null so we do not batchDelete for now
|
||||
if (partInfo && partInfo.dataStoreType === 'azure') {
|
||||
// skip to storing metadata
|
||||
return next(null, destinationBucket, partInfo,
|
||||
partInfo.dataStoreETag,
|
||||
cipherBundle, partKey, prevObjectSize, null,
|
||||
objectLocationConstraint);
|
||||
objectLocationConstraint, splitter);
|
||||
}
|
||||
const objectContext = {
|
||||
bucketName,
|
||||
|
@ -275,12 +276,13 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
|||
}
|
||||
return next(null, destinationBucket, dataGetInfo, hexDigest,
|
||||
cipherBundle, partKey, prevObjectSize, oldLocations,
|
||||
objectLocationConstraint);
|
||||
objectLocationConstraint, splitter);
|
||||
});
|
||||
},
|
||||
// Store data locations in metadata and delete any overwritten data.
|
||||
// Store data locations in metadata and delete any overwritten
|
||||
// data if completeMPU hasn't been initiated yet.
|
||||
(destinationBucket, dataGetInfo, hexDigest, cipherBundle, partKey,
|
||||
prevObjectSize, oldLocations, objectLocationConstraint, next) => {
|
||||
prevObjectSize, oldLocations, objectLocationConstraint, splitter, next) => {
|
||||
// Use an array to be consistent with objectPutCopyPart where there
|
||||
// could be multiple locations.
|
||||
const partLocations = [dataGetInfo];
|
||||
|
@ -310,19 +312,54 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
|||
});
|
||||
return next(err, destinationBucket);
|
||||
}
|
||||
return next(null, partLocations, oldLocations, objectLocationConstraint,
|
||||
destinationBucket, hexDigest, prevObjectSize, splitter);
|
||||
});
|
||||
},
|
||||
(partLocations, oldLocations, objectLocationConstraint, destinationBucket,
|
||||
hexDigest, prevObjectSize, splitter, next) => {
|
||||
if (!oldLocations) {
|
||||
return next(null, oldLocations, objectLocationConstraint,
|
||||
destinationBucket, hexDigest, prevObjectSize);
|
||||
}
|
||||
return services.isCompleteMPUInProgress({
|
||||
bucketName,
|
||||
objectKey,
|
||||
uploadId,
|
||||
splitter,
|
||||
}, log, (err, completeInProgress) => {
|
||||
if (err) {
|
||||
return next(err, destinationBucket);
|
||||
}
|
||||
let oldLocationsToDelete = oldLocations;
|
||||
// Prevent deletion of old data if a completeMPU
|
||||
// is already in progress because then there is no
|
||||
// guarantee that the old location will not be the
|
||||
// committed one.
|
||||
if (completeInProgress) {
|
||||
log.warn('not deleting old locations because CompleteMPU is in progress', {
|
||||
method: 'objectPutPart::metadata.getObjectMD',
|
||||
bucketName,
|
||||
objectKey,
|
||||
uploadId,
|
||||
partLocations,
|
||||
oldLocations,
|
||||
});
|
||||
oldLocationsToDelete = null;
|
||||
}
|
||||
return next(null, oldLocationsToDelete, objectLocationConstraint,
|
||||
destinationBucket, hexDigest, prevObjectSize);
|
||||
});
|
||||
},
|
||||
// Clean up any old data now that new metadata (with new
|
||||
// data locations) has been stored.
|
||||
(oldLocations, objectLocationConstraint, destinationBucket, hexDigest,
|
||||
(oldLocationsToDelete, objectLocationConstraint, destinationBucket, hexDigest,
|
||||
prevObjectSize, next) => {
|
||||
if (oldLocations) {
|
||||
if (oldLocationsToDelete) {
|
||||
log.trace('overwriting mpu part, deleting data');
|
||||
const delLog = logger.newRequestLoggerFromSerializedUids(
|
||||
log.getSerializedUids());
|
||||
return data.batchDelete(oldLocations, request.method,
|
||||
return data.batchDelete(oldLocationsToDelete, request.method,
|
||||
objectLocationConstraint, delLog, err => {
|
||||
if (err) {
|
||||
// if error, log the error and move on as it is not
|
||||
|
|
125
lib/services.js
125
lib/services.js
|
@ -315,6 +315,57 @@ const services = {
|
|||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* Deletes objects from a bucket, but do not delete it from storage yet
|
||||
* @param {string} bucketName - bucket in which objectMD is stored
|
||||
* @param {object} objectMD - object's metadata
|
||||
* @param {string} objectKey - object key name
|
||||
* @param {object} options - other instructions, such as { versionId } to
|
||||
* delete a specific version of the object
|
||||
* @param {Log} log - logger instance
|
||||
* @param {function} cb - callback from async.waterfall in objectGet
|
||||
* @return {undefined}
|
||||
*/
|
||||
deleteObjectWithDeferredStorage(bucketName, objectMD, objectKey, options, log, cb) {
|
||||
log.trace('deleting object from bucket');
|
||||
assert.strictEqual(typeof bucketName, 'string');
|
||||
assert.strictEqual(typeof objectMD, 'object');
|
||||
|
||||
function deleteMDandData() {
|
||||
return metadata.deleteObjectMD(bucketName, objectKey, options, log,
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
return cb(err, res);
|
||||
}
|
||||
log.trace('deleteObject: metadata delete OK');
|
||||
if (objectMD.location === null) {
|
||||
return cb(null, res);
|
||||
}
|
||||
|
||||
// We store all objects to delete, for each location
|
||||
// separately.
|
||||
let objectToDeleteFromStorage = [];
|
||||
if (!Array.isArray(objectMD.location)) {
|
||||
objectToDeleteFromStorage.push(objectMD.location);
|
||||
} else {
|
||||
objectToDeleteFromStorage = objectMD.location;
|
||||
}
|
||||
return cb(null, res, objectToDeleteFromStorage);
|
||||
});
|
||||
}
|
||||
|
||||
const objGetInfo = objectMD.location;
|
||||
// special case that prevents azure blocks from unecessary deletion
|
||||
// will return null if no need
|
||||
return data.protectAzureBlocks(bucketName, objectKey, objGetInfo,
|
||||
log, err => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
return deleteMDandData();
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* Gets list of objects in bucket
|
||||
* @param {object} bucketName - bucket in which objectMetadata is stored
|
||||
|
@ -451,6 +502,80 @@ const services = {
|
|||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* Mark the MPU overview key with a flag when starting the
|
||||
* CompleteMPU operation, to be checked by "put part" operations
|
||||
*
|
||||
* @param {object} params - params object
|
||||
* @param {string} params.bucketName - name of MPU bucket
|
||||
* @param {string} params.objectKey - object key
|
||||
* @param {string} params.uploadId - upload ID
|
||||
* @param {string} params.splitter - splitter for this overview key
|
||||
* @param {object} params.storedMetadata - original metadata of the overview key
|
||||
* @param {Logger} log - Logger object
|
||||
* @param {function} cb - callback(err)
|
||||
* @return {undefined}
|
||||
*/
|
||||
metadataMarkMPObjectForCompletion(params, log, cb) {
|
||||
assert.strictEqual(typeof params, 'object');
|
||||
assert.strictEqual(typeof params.bucketName, 'string');
|
||||
assert.strictEqual(typeof params.objectKey, 'string');
|
||||
assert.strictEqual(typeof params.uploadId, 'string');
|
||||
assert.strictEqual(typeof params.splitter, 'string');
|
||||
assert.strictEqual(typeof params.storedMetadata, 'object');
|
||||
const splitter = params.splitter;
|
||||
const longMPUIdentifier =
|
||||
`overview${splitter}${params.objectKey}${splitter}${params.uploadId}`;
|
||||
const multipartObjectMD = Object.assign({}, params.storedMetadata);
|
||||
multipartObjectMD.completeInProgress = true;
|
||||
metadata.putObjectMD(params.bucketName, longMPUIdentifier, multipartObjectMD,
|
||||
{}, log, err => {
|
||||
if (err) {
|
||||
log.error('error from metadata', { error: err });
|
||||
return cb(err);
|
||||
}
|
||||
return cb();
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* Returns if a CompleteMPU operation is in progress for this
|
||||
* object, by looking at the `completeInProgress` flag stored in
|
||||
* the overview key
|
||||
*
|
||||
* @param {object} params - params object
|
||||
* @param {string} params.bucketName - bucket name where object should be stored
|
||||
* @param {string} params.objectKey - object key
|
||||
* @param {string} params.uploadId - upload ID
|
||||
* @param {string} params.splitter - splitter for this overview key
|
||||
* @param {object} log - request logger instance
|
||||
* @param {function} cb - callback(err, {bool} completeInProgress)
|
||||
* @return {undefined}
|
||||
*/
|
||||
isCompleteMPUInProgress(params, log, cb) {
|
||||
assert.strictEqual(typeof params, 'object');
|
||||
assert.strictEqual(typeof params.bucketName, 'string');
|
||||
assert.strictEqual(typeof params.objectKey, 'string');
|
||||
assert.strictEqual(typeof params.uploadId, 'string');
|
||||
assert.strictEqual(typeof params.splitter, 'string');
|
||||
|
||||
const mpuBucketName = `${constants.mpuBucketPrefix}${params.bucketName}`;
|
||||
const splitter = params.splitter;
|
||||
const mpuOverviewKey =
|
||||
`overview${splitter}${params.objectKey}${splitter}${params.uploadId}`;
|
||||
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey, {}, log,
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
log.error('error getting the overview object from mpu bucket', {
|
||||
error: err,
|
||||
method: 'services.isCompleteMPUInProgress',
|
||||
params,
|
||||
});
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, Boolean(res.completeInProgress));
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* Checks whether bucket exists, multipart upload
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "s3",
|
||||
"version": "7.70.11",
|
||||
"version": "7.70.11-2",
|
||||
"description": "S3 connector",
|
||||
"main": "index.js",
|
||||
"engines": {
|
||||
|
|
|
@ -210,5 +210,39 @@ describe('Complete MPU', () => {
|
|||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe('with re-upload of part during CompleteMPU execution', () => {
|
||||
let uploadId;
|
||||
let eTag;
|
||||
|
||||
beforeEach(() => _initiateMpuAndPutOnePart()
|
||||
.then(result => {
|
||||
uploadId = result.uploadId;
|
||||
eTag = result.eTag;
|
||||
})
|
||||
);
|
||||
|
||||
it('should complete the MPU successfully and leave a readable object', done => {
|
||||
async.parallel([
|
||||
doneReUpload => s3.uploadPart({
|
||||
Bucket: bucket,
|
||||
Key: key,
|
||||
PartNumber: 1,
|
||||
UploadId: uploadId,
|
||||
Body: 'foo',
|
||||
}, err => {
|
||||
// in case the CompleteMPU finished earlier,
|
||||
// we may get a NoSuchKey error, so just
|
||||
// ignore it
|
||||
if (err && err.code === 'NoSuchKey') {
|
||||
return doneReUpload();
|
||||
}
|
||||
return doneReUpload(err);
|
||||
}),
|
||||
doneComplete => _completeMpuAndCheckVid(
|
||||
uploadId, eTag, undefined, doneComplete),
|
||||
], done);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -577,6 +577,72 @@ describe('Object Part Copy', () => {
|
|||
checkNoError(err);
|
||||
});
|
||||
});
|
||||
|
||||
it('should not corrupt object if overwriting an existing part by copying a part ' +
|
||||
'while the MPU is being completed', () => {
|
||||
// AWS response etag for this completed MPU
|
||||
const finalObjETag = '"db77ebbae9e9f5a244a26b86193ad818-1"';
|
||||
process.stdout.write('Putting first part in MPU test');
|
||||
return s3.uploadPartCopy({ Bucket: destBucketName,
|
||||
Key: destObjName,
|
||||
CopySource: `${sourceBucketName}/${sourceObjName}`,
|
||||
PartNumber: 1,
|
||||
UploadId: uploadId,
|
||||
}).promise().then(res => {
|
||||
assert.strictEqual(res.ETag, etag);
|
||||
assert(res.LastModified);
|
||||
}).then(() => {
|
||||
process.stdout.write('Overwriting first part in MPU test and completing MPU ' +
|
||||
'at the same time');
|
||||
return Promise.all([
|
||||
s3.uploadPartCopy({
|
||||
Bucket: destBucketName,
|
||||
Key: destObjName,
|
||||
CopySource: `${sourceBucketName}/${sourceObjName}`,
|
||||
PartNumber: 1,
|
||||
UploadId: uploadId,
|
||||
}).promise().catch(err => {
|
||||
// in case the CompleteMPU finished
|
||||
// earlier, we may get a NoSuchKey error,
|
||||
// so just ignore it and resolve with a
|
||||
// special value, otherwise re-throw the
|
||||
// error
|
||||
if (err && err.code === 'NoSuchKey') {
|
||||
return Promise.resolve(null);
|
||||
}
|
||||
throw err;
|
||||
}),
|
||||
s3.completeMultipartUpload({
|
||||
Bucket: destBucketName,
|
||||
Key: destObjName,
|
||||
UploadId: uploadId,
|
||||
MultipartUpload: {
|
||||
Parts: [
|
||||
{ ETag: etag, PartNumber: 1 },
|
||||
],
|
||||
},
|
||||
}).promise(),
|
||||
]);
|
||||
}).then(([uploadRes, completeRes]) => {
|
||||
// if upload succeeded before CompleteMPU finished
|
||||
if (uploadRes !== null) {
|
||||
assert.strictEqual(uploadRes.ETag, etag);
|
||||
assert(uploadRes.LastModified);
|
||||
}
|
||||
assert.strictEqual(completeRes.Bucket, destBucketName);
|
||||
assert.strictEqual(completeRes.Key, destObjName);
|
||||
assert.strictEqual(completeRes.ETag, finalObjETag);
|
||||
}).then(() => {
|
||||
process.stdout.write('Getting object put by MPU with ' +
|
||||
'overwrite part');
|
||||
return s3.getObject({
|
||||
Bucket: destBucketName,
|
||||
Key: destObjName,
|
||||
}).promise();
|
||||
}).then(res => {
|
||||
assert.strictEqual(res.ETag, finalObjETag);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('should return an error if no such upload initiated',
|
||||
|
|
|
@ -3,18 +3,7 @@ const async = require('async');
|
|||
|
||||
const withV4 = require('../support/withV4');
|
||||
const BucketUtility = require('../../lib/utility/bucket-util');
|
||||
const { maximumAllowedPartCount } = require('../../../../../constants');
|
||||
|
||||
const bucket = 'mpu-test-bucket';
|
||||
const object = 'mpu-test-object';
|
||||
|
||||
const bodySize = 1024 * 1024 * 5;
|
||||
const bodyContent = 'a';
|
||||
const howManyParts = 3;
|
||||
const partNumbers = Array.from(Array(howManyParts).keys());
|
||||
const invalidPartNumbers = [-1, 0, maximumAllowedPartCount + 1];
|
||||
|
||||
let ETags = [];
|
||||
const objectConfigs = require('../support/objectConfigs');
|
||||
|
||||
function checkError(err, statusCode, code) {
|
||||
assert.strictEqual(err.statusCode, statusCode);
|
||||
|
@ -26,22 +15,28 @@ function checkNoError(err) {
|
|||
`Expected success, got error ${JSON.stringify(err)}`);
|
||||
}
|
||||
|
||||
function generateContent(partNumber) {
|
||||
return Buffer.alloc(bodySize + partNumber, bodyContent);
|
||||
function generateContent(size, bodyContent) {
|
||||
return Buffer.alloc(size, bodyContent);
|
||||
}
|
||||
|
||||
describe('Part size tests with object head', () => {
|
||||
withV4(sigCfg => {
|
||||
objectConfigs.forEach(config => {
|
||||
describe(config.signature, () => {
|
||||
let ETags = [];
|
||||
|
||||
const {
|
||||
bucket,
|
||||
object,
|
||||
bodySize,
|
||||
bodyContent,
|
||||
partNumbers,
|
||||
invalidPartNumbers,
|
||||
} = config;
|
||||
|
||||
withV4(sigCfg => { //eslint-disable-line
|
||||
let bucketUtil;
|
||||
let s3;
|
||||
|
||||
function headObject(fields, cb) {
|
||||
s3.headObject(Object.assign({
|
||||
Bucket: bucket,
|
||||
Key: object,
|
||||
}, fields), cb);
|
||||
}
|
||||
|
||||
beforeEach(function beforeF(done) {
|
||||
bucketUtil = new BucketUtility('default', sigCfg);
|
||||
s3 = bucketUtil.s3;
|
||||
|
@ -55,12 +50,16 @@ describe('Part size tests with object head', () => {
|
|||
return next();
|
||||
}),
|
||||
next => async.mapSeries(partNumbers, (partNumber, callback) => {
|
||||
let allocAmount = bodySize + partNumber + 1;
|
||||
if (config.signature === 'for empty object') {
|
||||
allocAmount = 0;
|
||||
}
|
||||
const uploadPartParams = {
|
||||
Bucket: bucket,
|
||||
Key: object,
|
||||
PartNumber: partNumber + 1,
|
||||
UploadId: this.currentTest.UploadId,
|
||||
Body: generateContent(partNumber + 1),
|
||||
Body: generateContent(allocAmount, bodyContent),
|
||||
};
|
||||
|
||||
return s3.uploadPart(uploadPartParams,
|
||||
|
@ -105,10 +104,11 @@ describe('Part size tests with object head', () => {
|
|||
|
||||
it('should return the total size of the object ' +
|
||||
'when --part-number is not used', done => {
|
||||
const totalSize = partNumbers.reduce((total, current) =>
|
||||
total + (bodySize + current + 1), 0);
|
||||
headObject({}, (err, data) => {
|
||||
const totalSize = config.meta.computeTotalSize(partNumbers, bodySize);
|
||||
|
||||
s3.headObject({ Bucket: bucket, Key: object }, (err, data) => {
|
||||
checkNoError(err);
|
||||
|
||||
assert.equal(totalSize, data.ContentLength);
|
||||
done();
|
||||
});
|
||||
|
@ -119,8 +119,12 @@ describe('Part size tests with object head', () => {
|
|||
`when --part-number is set to ${part + 1}`, done => {
|
||||
const partNumber = Number.parseInt(part, 0) + 1;
|
||||
const partSize = bodySize + partNumber;
|
||||
headObject({ PartNumber: partNumber }, (err, data) => {
|
||||
|
||||
s3.headObject({ Bucket: bucket, Key: object, PartNumber: partNumber }, (err, data) => {
|
||||
checkNoError(err);
|
||||
if (data.ContentLength === 0) {
|
||||
done();
|
||||
}
|
||||
assert.equal(partSize, data.ContentLength);
|
||||
done();
|
||||
});
|
||||
|
@ -130,7 +134,7 @@ describe('Part size tests with object head', () => {
|
|||
invalidPartNumbers.forEach(part => {
|
||||
it(`should return an error when --part-number is set to ${part}`,
|
||||
done => {
|
||||
headObject({ PartNumber: part }, (err, data) => {
|
||||
s3.headObject({ Bucket: bucket, Key: object, PartNumber: part }, (err, data) => {
|
||||
checkError(err, 400, 'BadRequest');
|
||||
assert.strictEqual(data, null);
|
||||
done();
|
||||
|
@ -138,15 +142,26 @@ describe('Part size tests with object head', () => {
|
|||
});
|
||||
});
|
||||
|
||||
it('should return an error when incorrect --part-number is used',
|
||||
done => {
|
||||
headObject({ PartNumber: partNumbers.length + 1 },
|
||||
it('when incorrect --part-number is used', done => {
|
||||
bucketUtil = new BucketUtility('default', sigCfg);
|
||||
s3 = bucketUtil.s3;
|
||||
s3.headObject({ Bucket: bucket, Key: object, PartNumber: partNumbers.length + 1 },
|
||||
(err, data) => {
|
||||
if (config.meta.objectIsEmpty) {
|
||||
// returns metadata for the only empty part
|
||||
checkNoError(err);
|
||||
assert.strictEqual(data.ContentLength, 0);
|
||||
done();
|
||||
} else {
|
||||
// returns a 416 error
|
||||
// the error response does not contain the actual
|
||||
// statusCode instead it has '416'
|
||||
checkError(err, 416, 416);
|
||||
assert.strictEqual(data, null);
|
||||
done();
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
const { maximumAllowedPartCount } = require('../../../../../constants');
|
||||
|
||||
const canonicalObjectConfig = {
|
||||
bucket: 'mpu-test-bucket-canonical-object',
|
||||
object: 'mpu-test-object-canonical',
|
||||
bodySize: 1024 * 1024 * 5,
|
||||
bodyContent: 'a',
|
||||
howManyParts: 3,
|
||||
partNumbers: Array.from(Array(3).keys()), // 3 corresponds to howManyParts
|
||||
invalidPartNumbers: [-1, 0, maximumAllowedPartCount + 1],
|
||||
signature: 'for canonical object',
|
||||
meta: {
|
||||
computeTotalSize: (partNumbers, bodySize) => partNumbers.reduce((total, current) =>
|
||||
total + bodySize + current + 1
|
||||
, 0),
|
||||
objectIsEmpty: false,
|
||||
},
|
||||
};
|
||||
|
||||
const emptyObjectConfig = {
|
||||
bucket: 'mpu-test-bucket-empty-object',
|
||||
object: 'mpu-test-object-empty',
|
||||
bodySize: 0,
|
||||
bodyContent: null,
|
||||
howManyParts: 1,
|
||||
partNumbers: Array.from(Array(1).keys()), // 1 corresponds to howManyParts
|
||||
invalidPartNumbers: [-1, 0, maximumAllowedPartCount + 1],
|
||||
signature: 'for empty object',
|
||||
meta: {
|
||||
computeTotalSize: () => 0,
|
||||
objectIsEmpty: true,
|
||||
},
|
||||
};
|
||||
|
||||
const objectConfigs = [
|
||||
canonicalObjectConfig,
|
||||
emptyObjectConfig,
|
||||
];
|
||||
|
||||
module.exports = objectConfigs;
|
|
@ -27,6 +27,7 @@ const testData = 'testkey data';
|
|||
const testDataMd5 = crypto.createHash('md5')
|
||||
.update(testData, 'utf-8')
|
||||
.digest('hex');
|
||||
const emptyContentsMd5 = 'd41d8cd98f00b204e9800998ecf8427e';
|
||||
const testMd = {
|
||||
'md-model-version': 2,
|
||||
'owner-display-name': 'Bart',
|
||||
|
@ -60,6 +61,17 @@ const testMd = {
|
|||
},
|
||||
};
|
||||
|
||||
function checkObjectData(s3, objectKey, dataValue, done) {
|
||||
s3.getObject({
|
||||
Bucket: TEST_BUCKET,
|
||||
Key: objectKey,
|
||||
}, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.Body.toString(), dataValue);
|
||||
done();
|
||||
});
|
||||
}
|
||||
|
||||
/** makeBackbeatRequest - utility function to generate a request going
|
||||
* through backbeat route
|
||||
* @param {object} params - params for making request
|
||||
|
@ -416,7 +428,8 @@ describeSkipIfAWS('backbeat routes', () => {
|
|||
});
|
||||
});
|
||||
|
||||
it('should remove old object data locations if version is overwritten', done => {
|
||||
it('should remove old object data locations if version is overwritten ' +
|
||||
'with same contents', done => {
|
||||
let oldLocation;
|
||||
const testKeyOldData = `${testKey}-old-data`;
|
||||
async.waterfall([next => {
|
||||
|
@ -496,14 +509,96 @@ describeSkipIfAWS('backbeat routes', () => {
|
|||
}, (response, next) => {
|
||||
assert.strictEqual(response.statusCode, 200);
|
||||
// give some time for the async deletes to complete
|
||||
setTimeout(() => s3.getObject({
|
||||
setTimeout(() => checkObjectData(s3, testKey, testData, next),
|
||||
1000);
|
||||
}, next => {
|
||||
// check that the object copy referencing the old data
|
||||
// locations is unreadable, confirming that the old
|
||||
// data locations have been deleted
|
||||
s3.getObject({
|
||||
Bucket: TEST_BUCKET,
|
||||
Key: testKey,
|
||||
}, (err, data) => {
|
||||
assert.ifError(err);
|
||||
assert.strictEqual(data.Body.toString(), testData);
|
||||
Key: testKeyOldData,
|
||||
}, err => {
|
||||
assert(err, 'expected error to get object with old data ' +
|
||||
'locations, got success');
|
||||
next();
|
||||
}), 1000);
|
||||
});
|
||||
}], err => {
|
||||
assert.ifError(err);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('should remove old object data locations if version is overwritten ' +
|
||||
'with empty contents', done => {
|
||||
let oldLocation;
|
||||
const testKeyOldData = `${testKey}-old-data`;
|
||||
async.waterfall([next => {
|
||||
// put object's data locations
|
||||
makeBackbeatRequest({
|
||||
method: 'PUT', bucket: TEST_BUCKET,
|
||||
objectKey: testKey,
|
||||
resourceType: 'data',
|
||||
headers: {
|
||||
'content-length': testData.length,
|
||||
'content-md5': testDataMd5,
|
||||
'x-scal-canonical-id': testArn,
|
||||
},
|
||||
authCredentials: backbeatAuthCredentials,
|
||||
requestBody: testData }, next);
|
||||
}, (response, next) => {
|
||||
assert.strictEqual(response.statusCode, 200);
|
||||
// put object metadata
|
||||
const newMd = Object.assign({}, testMd);
|
||||
newMd.location = JSON.parse(response.body);
|
||||
oldLocation = newMd.location;
|
||||
makeBackbeatRequest({
|
||||
method: 'PUT', bucket: TEST_BUCKET,
|
||||
objectKey: testKey,
|
||||
resourceType: 'metadata',
|
||||
queryObj: {
|
||||
versionId: versionIdUtils.encode(testMd.versionId),
|
||||
},
|
||||
authCredentials: backbeatAuthCredentials,
|
||||
requestBody: JSON.stringify(newMd),
|
||||
}, next);
|
||||
}, (response, next) => {
|
||||
assert.strictEqual(response.statusCode, 200);
|
||||
// put another object which metadata reference the
|
||||
// same data locations, we will attempt to retrieve
|
||||
// this object at the end of the test to confirm that
|
||||
// its locations have been deleted
|
||||
const oldDataMd = Object.assign({}, testMd);
|
||||
oldDataMd.location = oldLocation;
|
||||
makeBackbeatRequest({
|
||||
method: 'PUT', bucket: TEST_BUCKET,
|
||||
objectKey: testKeyOldData,
|
||||
resourceType: 'metadata',
|
||||
authCredentials: backbeatAuthCredentials,
|
||||
requestBody: JSON.stringify(oldDataMd),
|
||||
}, next);
|
||||
}, (response, next) => {
|
||||
assert.strictEqual(response.statusCode, 200);
|
||||
// overwrite the original object version with an empty location
|
||||
const newMd = Object.assign({}, testMd);
|
||||
newMd['content-length'] = 0;
|
||||
newMd['content-md5'] = emptyContentsMd5;
|
||||
newMd.location = null;
|
||||
makeBackbeatRequest({
|
||||
method: 'PUT', bucket: TEST_BUCKET,
|
||||
objectKey: testKey,
|
||||
resourceType: 'metadata',
|
||||
queryObj: {
|
||||
versionId: versionIdUtils.encode(testMd.versionId),
|
||||
},
|
||||
authCredentials: backbeatAuthCredentials,
|
||||
requestBody: JSON.stringify(newMd),
|
||||
}, next);
|
||||
}, (response, next) => {
|
||||
assert.strictEqual(response.statusCode, 200);
|
||||
// give some time for the async deletes to complete
|
||||
setTimeout(() => checkObjectData(s3, testKey, '', next),
|
||||
1000);
|
||||
}, next => {
|
||||
// check that the object copy referencing the old data
|
||||
// locations is unreadable, confirming that the old
|
||||
|
|
|
@ -3,14 +3,15 @@ const assert = require('assert');
|
|||
const BucketInfo = require('arsenal').models.BucketInfo;
|
||||
const getReplicationInfo =
|
||||
require('../../../../lib/api/apiUtils/object/getReplicationInfo');
|
||||
const { makeAuthInfo } = require('../../helpers');
|
||||
|
||||
function _getObjectReplicationInfo(replicationConfig) {
|
||||
function _getObjectReplicationInfo(replicationConfig, authInfo, isDeleteMarker) {
|
||||
const bucketInfo = new BucketInfo(
|
||||
'testbucket', 'someCanonicalId', 'accountDisplayName',
|
||||
new Date().toJSON(),
|
||||
null, null, null, null, null, null, null, null, null,
|
||||
replicationConfig);
|
||||
return getReplicationInfo('fookey', bucketInfo, true, 123, null, null);
|
||||
return getReplicationInfo('fookey', bucketInfo, true, 123, null, null, authInfo, isDeleteMarker);
|
||||
}
|
||||
|
||||
describe('getReplicationInfo helper', () => {
|
||||
|
@ -40,6 +41,65 @@ describe('getReplicationInfo helper', () => {
|
|||
});
|
||||
});
|
||||
|
||||
it('should get replication info when action comming from a non-lifecycle session', () => {
|
||||
const replicationConfig = {
|
||||
role: 'arn:aws:iam::root:role/s3-replication-role',
|
||||
rules: [{
|
||||
prefix: '',
|
||||
enabled: true,
|
||||
storageClass: 'awsbackend',
|
||||
}],
|
||||
destination: 'tosomewhere',
|
||||
};
|
||||
|
||||
const authInfo = makeAuthInfo('accessKey1', null, 'another-session');
|
||||
const replicationInfo = _getObjectReplicationInfo(replicationConfig, authInfo, true);
|
||||
|
||||
assert.deepStrictEqual(replicationInfo, {
|
||||
status: 'PENDING',
|
||||
backends: [{
|
||||
site: 'awsbackend',
|
||||
status: 'PENDING',
|
||||
dataStoreVersionId: '',
|
||||
}],
|
||||
content: ['METADATA'],
|
||||
destination: 'tosomewhere',
|
||||
storageClass: 'awsbackend',
|
||||
role: 'arn:aws:iam::root:role/s3-replication-role',
|
||||
storageType: 'aws_s3',
|
||||
});
|
||||
});
|
||||
|
||||
it('should get replication info when action comming from a lifecycle session ' +
|
||||
'but action is not delete marker', () => {
|
||||
const replicationConfig = {
|
||||
role: 'arn:aws:iam::root:role/s3-replication-role',
|
||||
rules: [{
|
||||
prefix: '',
|
||||
enabled: true,
|
||||
storageClass: 'awsbackend',
|
||||
}],
|
||||
destination: 'tosomewhere',
|
||||
};
|
||||
|
||||
const authInfo = makeAuthInfo('accessKey1', null, 'backbeat-lifecycle');
|
||||
const replicationInfo = _getObjectReplicationInfo(replicationConfig, authInfo, false);
|
||||
|
||||
assert.deepStrictEqual(replicationInfo, {
|
||||
status: 'PENDING',
|
||||
backends: [{
|
||||
site: 'awsbackend',
|
||||
status: 'PENDING',
|
||||
dataStoreVersionId: '',
|
||||
}],
|
||||
content: ['METADATA'],
|
||||
destination: 'tosomewhere',
|
||||
storageClass: 'awsbackend',
|
||||
role: 'arn:aws:iam::root:role/s3-replication-role',
|
||||
storageType: 'aws_s3',
|
||||
});
|
||||
});
|
||||
|
||||
it('should not get replication info when rules are disabled', () => {
|
||||
const replicationConfig = {
|
||||
role: 'arn:aws:iam::root:role/s3-replication-role',
|
||||
|
@ -53,4 +113,21 @@ describe('getReplicationInfo helper', () => {
|
|||
const replicationInfo = _getObjectReplicationInfo(replicationConfig);
|
||||
assert.deepStrictEqual(replicationInfo, undefined);
|
||||
});
|
||||
|
||||
it('should not get replication info when action comming from lifecycle session', () => {
|
||||
const replicationConfig = {
|
||||
role: 'arn:aws:iam::root:role/s3-replication-role',
|
||||
rules: [{
|
||||
prefix: '',
|
||||
enabled: true,
|
||||
storageClass: 'awsbackend',
|
||||
}],
|
||||
destination: 'tosomewhere',
|
||||
};
|
||||
|
||||
const authInfo = makeAuthInfo('accessKey1', null, 'backbeat-lifecycle');
|
||||
const replicationInfo = _getObjectReplicationInfo(replicationConfig, authInfo, true);
|
||||
|
||||
assert.deepStrictEqual(replicationInfo, undefined);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -38,4 +38,16 @@ describe('Check if location keys have changed between object locations', () => {
|
|||
const curr = [{ key: 'ddd' }, { key: 'eee' }, { key: 'fff' }];
|
||||
assert.strictEqual(locationKeysHaveChanged(prev, curr), true);
|
||||
});
|
||||
|
||||
it('should return true if curr location is null', () => {
|
||||
const prev = [{ key: 'ddd' }, { key: 'eee' }, { key: 'fff' }];
|
||||
const curr = null;
|
||||
assert.strictEqual(locationKeysHaveChanged(prev, curr), true);
|
||||
});
|
||||
|
||||
it('should return true if both prev and curr locations are null', () => {
|
||||
const prev = null;
|
||||
const curr = null;
|
||||
assert.strictEqual(locationKeysHaveChanged(prev, curr), true);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -371,6 +371,19 @@ const policyChangeTestCases = [
|
|||
allowed: false,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'extending governance policy using same date',
|
||||
from: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
to: {
|
||||
mode: 'GOVERNANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
allowed: true,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'removing governance policy',
|
||||
from: {
|
||||
|
@ -443,6 +456,19 @@ const policyChangeTestCases = [
|
|||
allowed: false,
|
||||
allowedWithBypass: false,
|
||||
},
|
||||
{
|
||||
desc: 'extending compliance policy with the same date',
|
||||
from: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
to: {
|
||||
mode: 'COMPLIANCE',
|
||||
date: futureDate.toISOString(),
|
||||
},
|
||||
allowed: true,
|
||||
allowedWithBypass: true,
|
||||
},
|
||||
{
|
||||
desc: 'removing compliance policy',
|
||||
from: {
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
const assert = require('assert');
|
||||
|
||||
const { isLifecycleSession } =
|
||||
require('../../../../lib/api/apiUtils/authorization/permissionChecks.js');
|
||||
|
||||
const tests = [
|
||||
{
|
||||
arn: 'arn:aws:sts::257038443293:assumed-role/rolename/backbeat-lifecycle',
|
||||
description: 'a role assumed by lifecycle service',
|
||||
expectedResult: true,
|
||||
},
|
||||
{
|
||||
arn: undefined,
|
||||
description: 'undefined',
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
arn: '',
|
||||
description: 'empty',
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
arn: 'arn:aws:iam::257038443293:user/bart',
|
||||
description: 'a user',
|
||||
expectedResult: false,
|
||||
},
|
||||
{
|
||||
arn: 'arn:aws:sts::257038443293:assumed-role/rolename/other-service',
|
||||
description: 'a role assumed by another service',
|
||||
expectedResult: false,
|
||||
},
|
||||
];
|
||||
|
||||
describe('authInfoHelper', () => {
|
||||
tests.forEach(t => {
|
||||
it(`should return ${t.expectedResult} if arn is ${t.description}`, () => {
|
||||
const result = isLifecycleSession(t.arn);
|
||||
assert.equal(result, t.expectedResult);
|
||||
});
|
||||
});
|
||||
});
|
|
@ -1641,6 +1641,78 @@ describe('Multipart Upload API', () => {
|
|||
});
|
||||
});
|
||||
|
||||
it('should leave orphaned data when overwriting an object part during completeMPU',
|
||||
done => {
|
||||
const fullSizedPart = crypto.randomBytes(5 * 1024 * 1024);
|
||||
const overWritePart = Buffer.from('Overwrite content', 'utf8');
|
||||
let uploadId;
|
||||
|
||||
async.waterfall([
|
||||
next => bucketPut(authInfo, bucketPutRequest, log, next),
|
||||
(corsHeaders, next) => initiateMultipartUpload(authInfo,
|
||||
initiateRequest, log, next),
|
||||
(result, corsHeaders, next) => parseString(result, next),
|
||||
(json, next) => {
|
||||
uploadId = json.InitiateMultipartUploadResult.UploadId[0];
|
||||
const requestObj = {
|
||||
bucketName,
|
||||
namespace,
|
||||
objectKey,
|
||||
headers: { host: `${bucketName}.s3.amazonaws.com` },
|
||||
url: `/${objectKey}?partNumber=1&uploadId=${uploadId}`,
|
||||
query: {
|
||||
partNumber: '1',
|
||||
uploadId,
|
||||
},
|
||||
};
|
||||
const partRequest = new DummyRequest(requestObj, fullSizedPart);
|
||||
objectPutPart(authInfo, partRequest, undefined, log, (err, partCalculatedHash) => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
next(null, requestObj, partCalculatedHash);
|
||||
});
|
||||
},
|
||||
(requestObj, partCalculatedHash, next) => {
|
||||
assert.deepStrictEqual(ds[1].value, fullSizedPart);
|
||||
async.parallel([
|
||||
done => {
|
||||
const partRequest = new DummyRequest(requestObj, overWritePart);
|
||||
objectPutPart(authInfo, partRequest, undefined, log, err => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
done();
|
||||
});
|
||||
},
|
||||
done => {
|
||||
const completeBody = '<CompleteMultipartUpload>' +
|
||||
'<Part>' +
|
||||
'<PartNumber>1</PartNumber>' +
|
||||
`<ETag>"${partCalculatedHash}"</ETag>` +
|
||||
'</Part>' +
|
||||
'</CompleteMultipartUpload>';
|
||||
|
||||
const completeRequest = {
|
||||
bucketName,
|
||||
namespace,
|
||||
objectKey,
|
||||
parsedHost: 's3.amazonaws.com',
|
||||
url: `/${objectKey}?uploadId=${uploadId}`,
|
||||
headers: { host: `${bucketName}.s3.amazonaws.com` },
|
||||
query: { uploadId },
|
||||
post: completeBody,
|
||||
};
|
||||
completeMultipartUpload(authInfo, completeRequest, log, done);
|
||||
},
|
||||
], err => next(err));
|
||||
},
|
||||
],
|
||||
err => {
|
||||
assert.deepStrictEqual(err, null);
|
||||
assert.strictEqual(ds[0], undefined);
|
||||
assert.deepStrictEqual(ds[1].value, fullSizedPart);
|
||||
assert.deepStrictEqual(ds[2].value, overWritePart);
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('should throw an error on put of an object part with an invalid ' +
|
||||
'uploadId', done => {
|
||||
const testUploadId = 'invalidUploadID';
|
||||
|
@ -1841,12 +1913,22 @@ describe('complete mpu with versioning', () => {
|
|||
},
|
||||
(eTag, testUploadId, next) => {
|
||||
const origPutObject = metadataBackend.putObject;
|
||||
let callCount = 0;
|
||||
metadataBackend.putObject =
|
||||
(bucketName, objName, objVal, params, log, cb) => {
|
||||
(putBucketName, objName, objVal, params, log, cb) => {
|
||||
if (callCount === 0) {
|
||||
// first putObject sets the completeInProgress flag in the overview key
|
||||
assert.strictEqual(putBucketName, `${constants.mpuBucketPrefix}${bucketName}`);
|
||||
assert.strictEqual(
|
||||
objName, `overview${splitter}${objectKey}${splitter}${testUploadId}`);
|
||||
assert.strictEqual(objVal.completeInProgress, true);
|
||||
} else {
|
||||
assert.strictEqual(params.replayId, testUploadId);
|
||||
metadataBackend.putObject = origPutObject;
|
||||
metadataBackend.putObject(
|
||||
bucketName, objName, objVal, params, log, cb);
|
||||
}
|
||||
origPutObject(
|
||||
putBucketName, objName, objVal, params, log, cb);
|
||||
callCount += 1;
|
||||
};
|
||||
const parts = [{ partNumber: 1, eTag }];
|
||||
const completeRequest = _createCompleteMpuRequest(testUploadId,
|
||||
|
@ -1903,12 +1985,22 @@ describe('complete mpu with versioning', () => {
|
|||
},
|
||||
(eTag, testUploadId, next) => {
|
||||
const origPutObject = metadataBackend.putObject;
|
||||
let callCount = 0;
|
||||
metadataBackend.putObject =
|
||||
(bucketName, objName, objVal, params, log, cb) => {
|
||||
(putBucketName, objName, objVal, params, log, cb) => {
|
||||
if (callCount === 0) {
|
||||
// first putObject sets the completeInProgress flag in the overview key
|
||||
assert.strictEqual(putBucketName, `${constants.mpuBucketPrefix}${bucketName}`);
|
||||
assert.strictEqual(
|
||||
objName, `overview${splitter}${objectKey}${splitter}${testUploadId}`);
|
||||
assert.strictEqual(objVal.completeInProgress, true);
|
||||
} else {
|
||||
assert.strictEqual(params.replayId, testUploadId);
|
||||
metadataBackend.putObject = origPutObject;
|
||||
metadataBackend.putObject(
|
||||
bucketName, objName, objVal, params, log, cb);
|
||||
}
|
||||
origPutObject(
|
||||
putBucketName, objName, objVal, params, log, cb);
|
||||
callCount += 1;
|
||||
};
|
||||
const parts = [{ partNumber: 1, eTag }];
|
||||
const completeRequest = _createCompleteMpuRequest(testUploadId,
|
||||
|
|
|
@ -16,8 +16,8 @@ const bucketName = 'bucketname';
|
|||
const objectName = 'objectName';
|
||||
const postBody = Buffer.from('I am a body', 'utf8');
|
||||
|
||||
const date = new Date();
|
||||
date.setDate(date.getDate() + 1);
|
||||
const expectedMode = 'GOVERNANCE';
|
||||
const expectedDate = moment().add(2, 'days').toISOString();
|
||||
|
||||
const bucketPutRequest = {
|
||||
bucketName,
|
||||
|
@ -36,13 +36,13 @@ const putObjectRequest = new DummyRequest({
|
|||
const objectRetentionXmlGovernance = '<Retention ' +
|
||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
|
||||
'<Mode>GOVERNANCE</Mode>' +
|
||||
`<RetainUntilDate>${date.toISOString()}</RetainUntilDate>` +
|
||||
`<RetainUntilDate>${expectedDate}</RetainUntilDate>` +
|
||||
'</Retention>';
|
||||
|
||||
const objectRetentionXmlCompliance = '<Retention ' +
|
||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
|
||||
'<Mode>COMPLIANCE</Mode>' +
|
||||
`<RetainUntilDate>${moment().add(2, 'days').toISOString()}</RetainUntilDate>` +
|
||||
`<RetainUntilDate>${expectedDate}</RetainUntilDate>` +
|
||||
'</Retention>';
|
||||
|
||||
const objectRetentionXmlGovernanceLonger = '<Retention ' +
|
||||
|
@ -51,6 +51,12 @@ const objectRetentionXmlGovernanceLonger = '<Retention ' +
|
|||
`<RetainUntilDate>${moment().add(5, 'days').toISOString()}</RetainUntilDate>` +
|
||||
'</Retention>';
|
||||
|
||||
const objectRetentionXmlGovernanceShorter = '<Retention ' +
|
||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
|
||||
'<Mode>GOVERNANCE</Mode>' +
|
||||
`<RetainUntilDate>${moment().add(1, 'days').toISOString()}</RetainUntilDate>` +
|
||||
'</Retention>';
|
||||
|
||||
const objectRetentionXmlComplianceShorter = '<Retention ' +
|
||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
|
||||
'<Mode>COMPLIANCE</Mode>' +
|
||||
|
@ -95,8 +101,12 @@ const putObjRetRequestGovernanceLonger = {
|
|||
post: objectRetentionXmlGovernanceLonger,
|
||||
};
|
||||
|
||||
const expectedMode = 'GOVERNANCE';
|
||||
const expectedDate = date.toISOString();
|
||||
const putObjRetRequestGovernanceShorter = {
|
||||
bucketName,
|
||||
objectKey: objectName,
|
||||
headers: { host: `${bucketName}.s3.amazonaws.com` },
|
||||
post: objectRetentionXmlGovernanceShorter,
|
||||
};
|
||||
|
||||
describe('putObjectRetention API', () => {
|
||||
before(() => cleanup());
|
||||
|
@ -178,13 +188,24 @@ describe('putObjectRetention API', () => {
|
|||
+ 'GOVERNANCE mode is enabled', done => {
|
||||
objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {
|
||||
assert.ifError(err);
|
||||
return objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {
|
||||
return objectPutRetention(authInfo, putObjRetRequestGovernanceShorter, log, err => {
|
||||
assert.deepStrictEqual(err, errors.AccessDenied);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('should allow update if the x-amz-bypass-governance-retention header is missing and '
|
||||
+ 'GOVERNANCE mode is enabled and the same date is used', done => {
|
||||
objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {
|
||||
assert.ifError(err);
|
||||
return objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {
|
||||
assert.ifError(err);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('should allow update if the x-amz-bypass-governance-retention header is present and '
|
||||
+ 'GOVERNANCE mode is enabled', done => {
|
||||
objectPutRetention(authInfo, putObjRetRequestGovernance, log, err => {
|
||||
|
|
|
@ -66,7 +66,7 @@ function timeDiff(startTime) {
|
|||
return milliseconds;
|
||||
}
|
||||
|
||||
function makeAuthInfo(accessKey, userName) {
|
||||
function makeAuthInfo(accessKey, userName, sessionName) {
|
||||
const canIdMap = {
|
||||
accessKey1: '79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7'
|
||||
+ 'cd47ef2be',
|
||||
|
@ -94,6 +94,11 @@ function makeAuthInfo(accessKey, userName) {
|
|||
params.arn = `arn:aws:iam::${shortid}:user/${userName}`;
|
||||
}
|
||||
|
||||
if (sessionName) {
|
||||
params.IAMdisplayName = `[assumedRole] rolename:${sessionName}`;
|
||||
params.arn = `arn:aws:sts::${shortid}:assumed-role/rolename/${sessionName}`;
|
||||
}
|
||||
|
||||
return new AuthInfo(params);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue