Compare commits

...

5 Commits

Author SHA1 Message Date
Vinh Tao 467b51e07b [ci skip] read fix 2016-10-22 02:09:06 +02:00
Vinh Tao 364311acb2 [ci skip] read fix 2016-10-21 19:01:27 +02:00
Vinh Tao 08c70a888c mr linter 2016-10-07 16:10:21 +02:00
Vinh Tao bfd47eb9d5 use master branch Arsenal 2016-10-07 15:51:02 +02:00
Vinh Tao 25eddf38f0 versioning rebased 2016-10-07 15:42:43 +02:00
21 changed files with 342 additions and 152 deletions

View File

@ -110,8 +110,9 @@ export function deleteBucket(bucketMD, bucketName, canonicalID, log, cb) {
return async.waterfall([
function checkForObjectsStep(next) {
return metadata.listObject(bucketName, null, null, null, 1, log,
(err, objectsListRes) => {
const versioning = bucketMD.isVersioningOn();
return metadata.listObject(bucketName, { maxKeys: 1, versioning },
log, (err, objectsListRes) => {
if (err) {
log.error('error from metadata', { error: err });
return next(err);
@ -128,8 +129,8 @@ export function deleteBucket(bucketMD, bucketName, canonicalID, log, cb) {
// delete a bucket even if there are ongoing multipart uploads.
function deleteMPUbucketStep(next) {
const MPUBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
return metadata.listObject(MPUBucketName, null, null, null,
1, log, (err, objectsListRes) => {
return metadata.listObject(MPUBucketName, { maxKeys: 1 }, log,
(err, objectsListRes) => {
// If no shadow bucket ever created, no ongoing MPU's, so
// continue with deletion
if (err && err.NoSuchBucket) {

View File

@ -3,7 +3,9 @@ import constants from '../../constants';
import services from '../services';
import escapeForXML from '../utilities/escapeForXML';
import { errors } from 'arsenal';
import { errors, versioning } from 'arsenal';
const VSUtils = versioning.VersioningUtils;
// Sample XML response:
/* <ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
@ -29,6 +31,178 @@ import { errors } from 'arsenal';
</CommonPrefixes>
</ListBucketResult>*/
function formatXML(bucketName, listParams, encoding, list) {
const xml = [];
xml.push(
'<?xml version="1.0" encoding="UTF-8"?>',
'<ListBucketResult xmlns="http://s3.amazonaws.com/doc/' +
'2006-03-01/">',
`<Name>${bucketName}</Name>`
);
const isTruncated = list.IsTruncated ? 'true' : 'false';
const xmlParams = [
{ tag: 'Prefix', value: listParams.prefix },
{ tag: 'NextMarker', value: list.NextMarker },
{ tag: 'Marker', value: listParams.marker },
{ tag: 'MaxKeys', value: listParams.maxKeys },
{ tag: 'Delimiter', value: listParams.delimiter },
{ tag: 'IsTruncated', value: isTruncated },
];
xmlParams.forEach(param => {
if (param.value) {
xml.push(`<${param.tag}>${param.value}</${param.tag}>`);
} else {
xml.push(`<${param.tag}/>`);
}
});
list.Contents.forEach(item => {
const v = item.value;
const objectKey = encoding === 'url' ?
querystring.escape(item.key) : escapeForXML(item.key);
xml.push(
'<Contents>',
`<Key>${objectKey}</Key>`,
`<LastModified>${v.LastModified}</LastModified>`,
`<ETag>&quot;${v.ETag}&quot;</ETag>`,
`<Size>${v.Size}</Size>`,
'<Owner>',
`<ID>${v.Owner.ID}</ID>`,
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
'</Owner>',
`<StorageClass>${v.StorageClass}</StorageClass>`,
'</Contents>'
);
});
list.CommonPrefixes.forEach(item => {
xml.push(
`<CommonPrefixes><Prefix>${item}</Prefix></CommonPrefixes>`
);
});
xml.push('</ListBucketResult>');
return xml.join('');
}
// Sample XML response for versioning:
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETVersion.html
/*
<?xml version="1.0" encoding="UTF-8"?>
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
<Name>bucket</Name>
<Prefix>my</Prefix>
<KeyMarker/>
<VersionIdMarker/>
<MaxKeys>5</MaxKeys>
<IsTruncated>false</IsTruncated>
<Version>
<Key>my-image.jpg</Key>
<VersionId>3/L4kqtJl40Nr8X8gdRQBpUMLUo</VersionId>
<IsLatest>true</IsLatest>
<LastModified>2009-10-12T17:50:30.000Z</LastModified>
<ETag>&quot;fba9dede5f27731c9771645a39863328&quot;</ETag>
<Size>434234</Size>
<StorageClass>STANDARD</StorageClass>
<Owner>
<ID>canonical-user-id</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</Version>
<DeleteMarker>
<Key>my-image.jpg</Key>
<VersionId>03jpff543dhffds434rfdsFDN943fdsFkdmqnh892</VersionId>
<IsLatest>true</IsLatest>
<LastModified>2009-11-12T17:50:30.000Z</LastModified>
<Owner>
<ID>canonical-user-id</ID>
<DisplayName>mtd@amazon.com</DisplayName>
</Owner>
</DeleteMarker>
</ListVersionsResult>
*/
function formatVersionsXML(bucketName, listParams, encoding, list) {
const xml = [];
xml.push(
'<?xml version="1.0" encoding="UTF-8"?>',
'<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/' +
'2006-03-01/">',
`<Name>${bucketName}</Name>`
);
const xmlParams = [
{ tag: 'Prefix', value: listParams.prefix },
{ tag: 'MaxKeys', value: listParams.maxKeys },
{ tag: 'KeyMarker', value: listParams.keyMarker },
{ tag: 'VersionIdMarker', value: listParams.versionIdMarker },
];
xmlParams.forEach(param => {
if (param.value) {
xml.push(`<${param.tag}>${param.value}</${param.tag}>`);
} else {
xml.push(`<${param.tag}/>`);
}
});
const latestVersions = list.LatestVersions;
list.Contents.forEach(item => {
const key = encoding === 'url' ?
querystring.escape(item.key) : escapeForXML(item.key);
const v = VSUtils.decodeVersion(item.value);
const size = v['content-length'];
const ETag = v['content-md5'];
const lastModified = v['last-modified'];
const owner = {
DisplayName: v['owner-display-name'],
ID: v['owner-id'],
};
const storageClass = v['x-amz-storage-class'];
const versionId = VSUtils.getts(v);
const isLatest = latestVersions && versionId === latestVersions[key];
if (VSUtils.isDeleteMarker(v)) {
xml.push(
'<DeleteMarker>',
`<Key>${key}</Key>`,
`<VersionId>${versionId}</VersionId>`,
`<IsLatest>${isLatest}</IsLatest>`,
`<LastModified>${lastModified}</LastModified>`,
'<Owner>',
`<ID>${owner.ID}</ID>`,
`<DisplayName>${owner.DisplayName}</DisplayName>`,
'</Owner>',
'</DeleteMarker>'
);
} else {
xml.push(
'<Version>',
`<Key>${key}</Key>`,
`<VersionId>${versionId}</VersionId>`,
`<IsLatest>${isLatest}</IsLatest>`,
`<LastModified>${lastModified}</LastModified>`,
`<ETag>${ETag}</ETag>`,
`<Size>${size}</Size>`,
'<Owner>',
`<ID>${owner.ID}</ID>`,
`<DisplayName>${owner.DisplayName}</DisplayName>`,
'</Owner>',
`<StorageClass>${storageClass}</StorageClass>`,
'</Version>'
);
}
});
list.CommonPrefixes.forEach(item => {
xml.push(
`<CommonPrefixes><Prefix>${item}</Prefix></CommonPrefixes>`
);
});
xml.push('</ListVersionsResult>');
return xml.join('');
}
/**
* bucketGet - Return list of objects in bucket
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
@ -58,75 +232,31 @@ export default function bucketGet(authInfo, request, log, callback) {
requestType: 'bucketGet',
log,
};
const listParams = {
maxKeys,
delimiter: params.delimiter,
marker: params.marker,
prefix: params.prefix,
};
services.metadataValidateAuthorization(metadataValParams, err => {
services.metadataValidateAuthorization(metadataValParams, (err, bucket) => {
if (err) {
log.debug('error processing request', { error: err });
return callback(err);
}
const listParams = {
maxKeys,
delimiter: params.delimiter,
marker: params.marker,
prefix: params.prefix,
versioning: bucket.isVersioningOn(),
versions: params.versions === 'true' || params.versions === '',
};
return services.getObjectListing(bucketName, listParams, log,
(err, list) => {
if (err) {
log.debug('error processing request', { error: err });
return callback(err);
}
const xml = [];
xml.push(
'<?xml version="1.0" encoding="UTF-8"?>',
'<ListBucketResult xmlns="http://s3.amazonaws.com/doc/' +
'2006-03-01/">',
`<Name>${bucketName}</Name>`
);
const isTruncated = list.IsTruncated ? 'true' : 'false';
const xmlParams = [
{ tag: 'Prefix', value: listParams.prefix },
{ tag: 'NextMarker', value: list.NextMarker },
{ tag: 'Marker', value: listParams.marker },
{ tag: 'MaxKeys', value: listParams.maxKeys },
{ tag: 'Delimiter', value: listParams.delimiter },
{ tag: 'IsTruncated', value: isTruncated },
];
xmlParams.forEach(param => {
if (param.value) {
xml.push(`<${param.tag}>${param.value}</${param.tag}>`);
} else {
xml.push(`<${param.tag}/>`);
}
});
list.Contents.forEach(item => {
const v = item.value;
const objectKey = encoding === 'url' ?
querystring.escape(item.key) : escapeForXML(item.key);
xml.push(
'<Contents>',
`<Key>${objectKey}</Key>`,
`<LastModified>${v.LastModified}</LastModified>`,
`<ETag>&quot;${v.ETag}&quot;</ETag>`,
`<Size>${v.Size}</Size>`,
'<Owner>',
`<ID>${v.Owner.ID}</ID>`,
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
'</Owner>',
`<StorageClass>${v.StorageClass}</StorageClass>`,
'</Contents>'
);
});
list.CommonPrefixes.forEach(item => {
xml.push(
`<CommonPrefixes><Prefix>${item}</Prefix></CommonPrefixes>`
);
});
xml.push('</ListBucketResult>');
return callback(null, xml.join(''));
log.info('received list', list);
const xml = listParams.versions ?
formatVersionsXML(bucketName, listParams, encoding, list) :
formatXML(bucketName, listParams, encoding, list);
return callback(null, xml);
});
});
return undefined;

View File

@ -337,8 +337,12 @@ function completeMultipartUpload(authInfo, request, log, callback) {
dataLocations, metaStoreParams, mpuBucket,
mpuOverviewKey, aggregateETag,
storedPartsAsObjects, objMD, next) {
// TODO find a better way than this manual patching
const _metaStoreParams = metaStoreParams;
_metaStoreParams.versioning = destinationBucket.isVersioningOn();
_metaStoreParams.versionId = request.query.versionId;
services.metadataStoreObject(destinationBucket.getName(),
dataLocations, pseudoCipherBundle, metaStoreParams, err => {
dataLocations, pseudoCipherBundle, _metaStoreParams, err => {
if (err) {
return next(err);
}

View File

@ -3,7 +3,6 @@ import { errors } from 'arsenal';
import data from '../data/wrapper';
import kms from '../kms/wrapper';
import { logger } from '../utilities/logger';
import services from '../services';
import utils from '../utils';
import validateHeaders from '../utilities/validateHeaders';
@ -96,6 +95,7 @@ function objectCopy(authInfo, request, sourceBucket,
objectKey: sourceObject,
requestType: 'objectGet',
log,
// TODO copy a specific version of an object
};
const valPutParams = {
authInfo,
@ -180,11 +180,15 @@ function objectCopy(authInfo, request, sourceBucket,
destObjMD, next) {
const serverSideEncryption = destBucketMD.getServerSideEncryption();
// TODO find a better approach than manual patching
const _storeMetadataParams = storeMetadataParams;
_storeMetadataParams.versioning = destBucketMD.isVersioningOn();
// skip if source and dest the same or 0 byte object
// still send along serverSideEncryption info so algo
// and masterKeyId stored properly in metadata
if (sourceIsDestination || dataLocator.length === 0) {
return next(null, storeMetadataParams, dataLocator, destObjMD,
return next(null, _storeMetadataParams, dataLocator, destObjMD,
serverSideEncryption);
}
// dataLocator is an array. need to get and put all parts
@ -249,7 +253,7 @@ function objectCopy(authInfo, request, sourceBucket,
{ error: err });
return next(err);
}
return next(null, storeMetadataParams, results,
return next(null, _storeMetadataParams, results,
destObjMD, serverSideEncryption);
});
},
@ -266,6 +270,8 @@ function objectCopy(authInfo, request, sourceBucket,
// put is an overwrite of already existing
// object with same name
// so long as the source is not the same as the destination
// <versioning_and_replication>
/*
let dataToDelete;
if (destObjMD && destObjMD.location &&
!sourceIsDestination) {
@ -275,6 +281,8 @@ function objectCopy(authInfo, request, sourceBucket,
logger.newRequestLoggerFromSerializedUids(
log.getSerializedUids()));
}
*/
// </versioning_and_replication>
const sourceObjSize = storeMetadataParams.size;
const destObjPrevSize = destObjMD ?
destObjMD['content-length'] : null;

View File

@ -28,6 +28,7 @@ export default function objectDelete(authInfo, request, log, cb) {
objectKey,
requestType: 'objectDelete',
log,
query: request.query,
};
return services.metadataValidateAuthorization(valParams,
(err, bucket, objMD) => {
@ -50,12 +51,16 @@ export default function objectDelete(authInfo, request, log, cb) {
contentLength: objMD['content-length'],
});
}
const params = {
versioning: bucket.isVersioningOn(),
versionId: request.query ? request.query.versionId : undefined,
};
return services.deleteObject(bucketName, objMD, objectKey, log,
err => {
if (err) {
return cb(err);
}
return cb(err, objMD['content-length']);
});
}, params);
});
}

View File

@ -24,6 +24,7 @@ function objectGet(authInfo, request, log, callback) {
objectKey,
requestType: 'objectGet',
log,
query: request.query,
};
services.metadataValidateAuthorization(mdValParams, (err, bucket,

View File

@ -46,6 +46,7 @@ export default function objectGetACL(authInfo, request, log, callback) {
objectKey,
requestType: 'objectGetACL',
log,
query: request.query,
};
const grantInfo = {
grants: [],

View File

@ -24,6 +24,7 @@ export default function objectHead(authInfo, request, log, callback) {
objectKey,
requestType: 'objectHead',
log,
query: request.query,
};
return services.metadataValidateAuthorization(metadataValParams,

View File

@ -22,7 +22,7 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
});
}
function _storeIt(bucketName, objectKey, objMD, authInfo, canonicalID,
function _storeIt(bucketName, bucket, objectKey, objMD, authInfo, canonicalID,
cipherBundle, request, streamingV4Params, log, callback) {
const size = request.parsedContentLength;
const contentType = request.headers['content-type'];
@ -48,12 +48,17 @@ function _storeIt(bucketName, objectKey, objMD, authInfo, canonicalID,
contentType,
headers,
log,
versioning: bucket.isVersioningOn(),
versionId: request.query ? request.query.versionId : undefined,
};
let dataToDelete;
if (objMD && objMD.location) {
dataToDelete = Array.isArray(objMD.location) ?
objMD.location : [objMD.location];
}
// <versioning_and_replication>
// now deletion is the job of garbage collector
// if (objMD && objMD.location) {
// dataToDelete = Array.isArray(objMD.location) ?
// objMD.location : [objMD.location];
// }
// </versioning_and_replication>
// null - new object
// 0 or > 0 - existing object with content-length 0 or greater than 0
@ -148,6 +153,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
objectKey,
requestType: 'objectPut',
log,
query: request.query,
};
const canonicalID = authInfo.getCanonicalID();
log.trace('owner canonicalID to send to data', { canonicalID });
@ -193,13 +199,13 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
if (err) {
return callback(errors.InternalError);
}
return _storeIt(bucketName, objectKey,
objMD, authInfo, canonicalID,
cipherBundle, request,
return _storeIt(bucketName, bucket,
objectKey, objMD, authInfo,
canonicalID, cipherBundle, request,
streamingV4Params, log, callback);
});
}
return _storeIt(bucketName, objectKey, objMD,
return _storeIt(bucketName, bucket, objectKey, objMD,
authInfo, canonicalID, null, request,
streamingV4Params, log, callback);
});
@ -211,12 +217,12 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
if (err) {
return callback(errors.InternalError);
}
return _storeIt(bucketName, objectKey, objMD,
return _storeIt(bucketName, bucket, objectKey, objMD,
authInfo, canonicalID, cipherBundle,
request, streamingV4Params, log, callback);
});
}
return _storeIt(bucketName, objectKey, objMD, authInfo, canonicalID,
null, request, streamingV4Params, log, callback);
return _storeIt(bucketName, bucket, objectKey, objMD, authInfo,
canonicalID, null, request, streamingV4Params, log, callback);
});
}

View File

@ -61,6 +61,7 @@ export default function objectPutACL(authInfo, request, log, cb) {
objectKey,
requestType: 'objectPutACL',
log,
query: request.query,
};
const possibleGrants = ['FULL_CONTROL', 'WRITE_ACP', 'READ', 'READ_ACP'];
const addACLParams = {
@ -224,7 +225,14 @@ export default function objectPutACL(authInfo, request, log, cb) {
},
function waterfall4(bucket, objectMD, ACLParams, next) {
// Add acl's to object metadata
acl.addObjectACL(bucket, objectKey, objectMD, ACLParams, log, next);
const params = {
versioning: bucket.isVersioningOn(),
// TODO update both master and the specific versions using CAS
// now this will create another master version in the buckets
versionId: request.query ? request.query.versionId : undefined,
};
acl.addObjectACL(bucket, objectKey, objectMD, ACLParams, log, next,
params);
},
], err => {
if (err) {

View File

@ -1,4 +1,7 @@
import assert from 'assert';
import { versioning } from 'arsenal';
const VSC = versioning.VersioningConstants;
// WHEN UPDATING THIS NUMBER, UPDATE MODELVERSION.MD CHANGELOG
const modelVersion = 3;
@ -325,4 +328,13 @@ export default class BucketInfo {
hasDeletedFlag() {
return !!this._deleted;
}
/**
* Check if the versioning mode is on.
* TODO use a specific attribute for versioning mode; now infer from name
* @return {boolean} - versioning mode status
*/
isVersioningOn() {
return !VSC.SpecialBuckets.some(prefix =>
this._name.startsWith(prefix));
}
}

View File

@ -12,11 +12,12 @@ const acl = {
metadata.updateBucket(bucket.getName(), bucket, log, cb);
},
addObjectACL(bucket, objectKey, objectMD, addACLParams, log, cb) {
addObjectACL(bucket, objectKey, objectMD, addACLParams, log, cb, params) {
log.trace('updating object acl in metadata');
// eslint-disable-next-line no-param-reassign
objectMD.acl = addACLParams;
metadata.putObjectMD(bucket.getName(), objectKey, objectMD, log, cb);
metadata.putObjectMD(bucket.getName(), objectKey, objectMD, log, cb,
params);
},
parseAclFromHeaders(params, cb) {

View File

@ -44,14 +44,14 @@ class BucketClientInterface {
});
}
getBucketAndObject(bucketName, objName, log, cb) {
getBucketAndObject(bucketName, objName, log, cb, params) {
this.client.getBucketAndObject(bucketName, objName,
log.getSerializedUids(), (err, data) => {
if (err && (!err.NoSuchKey && !err.ObjNotFound)) {
return cb(err);
}
return cb(null, JSON.parse(data));
});
}, params);
}
putBucketAttributes(bucketName, bucketMD, log, cb) {
@ -63,24 +63,24 @@ class BucketClientInterface {
this.client.deleteBucket(bucketName, log.getSerializedUids(), cb);
}
putObject(bucketName, objName, objVal, log, cb) {
putObject(bucketName, objName, objVal, log, cb, params) {
this.client.putObject(bucketName, objName, JSON.stringify(objVal),
log.getSerializedUids(), cb);
log.getSerializedUids(), cb, params);
}
getObject(bucketName, objName, log, cb) {
getObject(bucketName, objName, log, cb, params) {
this.client.getObject(bucketName, objName, log.getSerializedUids(),
(err, data) => {
if (err) {
return cb(err);
}
return cb(err, JSON.parse(data));
});
}, params);
}
deleteObject(bucketName, objName, log, cb) {
deleteObject(bucketName, objName, log, cb, params) {
this.client.deleteObject(bucketName, objName, log.getSerializedUids(),
cb);
cb, params);
}
listObject(bucketName, params, log, cb) {

View File

@ -67,7 +67,7 @@ const metadata = {
});
},
putObjectMD: (bucketName, objName, objVal, log, cb) => {
putObjectMD: (bucketName, objName, objVal, log, cb, params) => {
log.debug('putting object in metdata');
client.putObject(bucketName, objName, objVal, log, err => {
if (err) {
@ -76,10 +76,10 @@ const metadata = {
}
log.debug('object successfully put in metadata');
return cb(err);
});
}, params);
},
getBucketAndObjectMD: (bucketName, objName, log, cb) => {
getBucketAndObjectMD: (bucketName, objName, log, cb, params) => {
log.debug('getting bucket and object from metadata',
{ database: bucketName, object: objName });
client.getBucketAndObject(bucketName, objName, log, (err, data) => {
@ -90,10 +90,10 @@ const metadata = {
log.debug('bucket and object retrieved from metadata',
{ database: bucketName, object: objName });
return cb(err, data);
});
}, params);
},
getObjectMD: (bucketName, objName, log, cb) => {
getObjectMD: (bucketName, objName, log, cb, params) => {
log.debug('getting object from metadata');
client.getObject(bucketName, objName, log, (err, data) => {
if (err) {
@ -102,10 +102,10 @@ const metadata = {
}
log.debug('object retrieved from metadata');
return cb(err, data);
});
}, params);
},
deleteObjectMD: (bucketName, objName, log, cb) => {
deleteObjectMD: (bucketName, objName, log, cb, params) => {
log.debug('deleting object from metadata');
client.deleteObject(bucketName, objName, log, err => {
if (err) {
@ -114,21 +114,19 @@ const metadata = {
}
log.debug('object deleted from metadata');
return cb(err);
});
}, params);
},
listObject: (bucketName, prefix, marker, delimiter, maxKeys, log, cb) => {
client
.listObject(bucketName, { prefix, marker, maxKeys, delimiter },
log, (err, data) => {
log.debug('getting object listing from metadata');
if (err) {
log.warn('error from metadata', { implName, err });
return cb(err);
}
log.debug('object listing retrieved from metadata');
return cb(err, data);
});
listObject: (bucketName, params, log, cb) => {
client.listObject(bucketName, params, log, (err, data) => {
log.debug('getting object listing from metadata');
if (err) {
log.warn('error from metadata', { implName, err });
return cb(err);
}
log.debug('object listing retrieved from metadata');
return cb(err, data);
});
},
listMultipartUploads: (bucketName, listingParams, log, cb) => {

View File

@ -25,7 +25,7 @@ export default {
const bucketUsers = overrideUserbucket || usersBucket;
// Note that since maxKeys on a listObject request is 10,000,
// this request will retrieve up to 10,000 bucket names for a user.
metadata.listObject(bucketUsers, prefix, null, null, null, log,
metadata.listObject(bucketUsers, { prefix }, log,
(err, listResponse) => {
// If MD responds with NoSuchBucket, this means the
// hidden usersBucket has not yet been created for
@ -130,7 +130,7 @@ export default {
}
log.trace('found bucket and object in metadata');
return cb(null, bucket, obj);
});
}, params.query);
},
/**
@ -204,7 +204,7 @@ export default {
metadataStoreObject(bucketName, dataGetInfo, cipherBundle, params, cb) {
const { objectKey, authInfo, size, contentMD5, metaHeaders,
contentType, multipart, headers, overrideMetadata, log,
lastModifiedDate } = params;
lastModifiedDate, versioning, versionId } = params;
log.trace('storing object in metadata');
assert.strictEqual(typeof bucketName, 'string');
const omVal = {};
@ -247,7 +247,7 @@ export default {
// simple/no version. will expand once object versioning is introduced
omVal['x-amz-version-id'] = 'null';
omVal.acl = {
Canned: 'private',
Canned: 'authenticated-read',
FULL_CONTROL: [],
WRITE_ACP: [],
READ: [],
@ -296,7 +296,7 @@ export default {
}
log.trace('object successfully stored in metadata');
return cb(err, contentMD5);
});
}, { versioning, versionId });
return undefined;
});
} else {
@ -307,7 +307,7 @@ export default {
}
log.trace('object successfully stored in metadata');
return cb(err, contentMD5);
});
}, { versioning, versionId });
}
},
@ -318,12 +318,17 @@ export default {
* @param {string} objectKey - object key name
* @param {Log} log - logger instance
* @param {function} cb - callback from async.waterfall in objectGet
* @param {object} params - additional versioning information
* @return {undefined}
*/
deleteObject(bucketName, objectMD, objectKey, log, cb) {
deleteObject(bucketName, objectMD, objectKey, log, cb, params) {
log.trace('deleting object from bucket');
assert.strictEqual(typeof bucketName, 'string');
assert.strictEqual(typeof objectMD, 'object');
metadata.deleteObjectMD(bucketName, objectKey, log, cb, params);
// <versioning_and_replication>
// now deletion is handled by garbage collector
/*
if (objectMD['x-amz-version-id'] === 'null') {
log.trace('object identified as non-versioned');
// non-versioned buckets
@ -368,6 +373,8 @@ export default {
log.warn('deleteObject: versioning not fully implemented');
return metadata.deleteObjectMD(bucketName, objectKey, log, cb);
}
*/
// </versioning_and_replication>
return undefined;
},
@ -383,11 +390,10 @@ export default {
*/
getObjectListing(bucketName, listingParams, log, cb) {
assert.strictEqual(typeof bucketName, 'string');
const { delimiter, marker, prefix } = listingParams;
const maxKeys = Number(listingParams.maxKeys);
log.trace('performing metadata get object listing',
{ listingParams, maxKeys });
metadata.listObject(bucketName, prefix, marker, delimiter, maxKeys, log,
metadata.listObject(bucketName, listingParams, log,
(err, listResponse) => {
if (err) {
log.warn('error from metadata', { error: err });
@ -461,7 +467,7 @@ export default {
headers: params.headers,
resourceType: 'object',
acl: {
Canned: 'private',
Canned: 'authenticated-read',
FULL_CONTROL: [],
WRITE_ACP: [],
READ: [],
@ -521,8 +527,7 @@ export default {
delimiter: undefined,
maxKeys: 1,
};
metadata.listObject(mpuBucketName, searchArgs.prefix,
searchArgs.marker, searchArgs.delimiter, searchArgs.maxKeys,
metadata.listObject(mpuBucketName, searchArgs,
log, (err, response) => {
if (err) {
return cb(err);
@ -739,8 +744,7 @@ export default {
delimiter: undefined,
maxKeys: 10000,
};
metadata.listObject(mpuBucketName, searchArgs.prefix, searchArgs.marker,
searchArgs.delimiter, searchArgs.maxKeys, log, cb);
metadata.listObject(mpuBucketName, searchArgs, log, cb);
},
getSomeMPUparts(params, cb) {
@ -754,8 +758,7 @@ export default {
delimiter: undefined,
maxKeys: maxParts,
};
metadata.listObject(mpuBucketName, searchArgs.prefix, searchArgs.marker,
searchArgs.delimiter, searchArgs.maxKeys, log, cb);
metadata.listObject(mpuBucketName, searchArgs, log, cb);
},
batchDeleteObjectMetadata(mpuBucketName, keysToDelete, log, cb) {

View File

@ -49,8 +49,8 @@ describe('bucketDelete API', () => {
metadata.getBucket(bucketName, log, (err, md) => {
assert.strictEqual(md.getName(), bucketName);
metadata.listObject(usersBucket,
authInfo.getCanonicalID(),
null, null, null, log, (err, listResponse) => {
{ prefix: authInfo.getCanonicalID() },
log, (err, listResponse) => {
assert.strictEqual(listResponse.Contents.length,
1);
done();
@ -67,8 +67,8 @@ describe('bucketDelete API', () => {
metadata.getBucket(bucketName, log, (err, md) => {
assert.deepStrictEqual(err, errors.NoSuchBucket);
assert.strictEqual(md, undefined);
metadata.listObject(usersBucket, canonicalID,
null, null, null, log, (err, listResponse) => {
metadata.listObject(usersBucket, { prefix: canonicalID },
log, (err, listResponse) => {
assert.strictEqual(listResponse.Contents.length, 0);
done();
});

View File

@ -48,8 +48,8 @@ describe('bucketPut API', () => {
assert.strictEqual(md.getName(), bucketName);
assert.strictEqual(md.getOwner(), canonicalID);
const prefix = `${canonicalID}${splitter}`;
metadata.listObject(usersBucket, prefix,
null, null, null, log, (err, listResponse) => {
metadata.listObject(usersBucket, { prefix },
log, (err, listResponse) => {
assert.strictEqual(listResponse.Contents[0].key,
`${canonicalID}${splitter}${bucketName}`);
done();

View File

@ -284,8 +284,8 @@ describe('deleted flag bucket handling', () => {
assert.strictEqual(data._owner, authInfo.getCanonicalID());
metadata.listObject(`${constants.mpuBucketPrefix}` +
`${bucketName}`,
`overview${constants.splitter}${objName}`,
null, null, null, log, (err, results) => {
{ prefix: `overview${constants.splitter}${objName}` },
log, (err, results) => {
assert.ifError(err);
assert.strictEqual(results.Contents.length, 1);
done();

View File

@ -203,6 +203,9 @@ describe('objectPut API', () => {
});
});
// <versioning_and_replication>
// data deletion is now the job of garbage collector
/*
it('should not leave orphans in data when overwriting an object', done => {
const testPutObjectRequest2 = new DummyRequest({
bucketName,
@ -234,4 +237,6 @@ describe('objectPut API', () => {
});
});
});
*/
// </versioning_and_replication>
});

View File

@ -214,8 +214,8 @@ describe('transient bucket handling', () => {
assert.strictEqual(data._owner, authInfo.getCanonicalID());
metadata.listObject(`${constants.mpuBucketPrefix}` +
`${bucketName}`,
`overview${constants.splitter}${objName}`,
null, null, null, log, (err, results) => {
{ prefix: `overview${constants.splitter}${objName}` },
log, (err, results) => {
assert.ifError(err);
assert.strictEqual(results.Contents.length, 1);
done();

View File

@ -117,8 +117,8 @@ describe('bucket API for getting a subset of objects from a bucket', () => {
next =>
metadata.putObjectMD(bucketName, 'key1/', '{}', log, next),
next =>
metadata.listObject(bucketName, 'key', null, delimiter,
defaultLimit, log, next),
metadata.listObject(bucketName, { prefix: 'key', delimiter,
maxKeys: defaultLimit }, log, next),
], (err, response) => {
assert.strictEqual(isKeyInContents(response, 'key1'), true);
assert.strictEqual(response.CommonPrefixes.indexOf('key1'), -1);
@ -141,8 +141,8 @@ describe('bucket API for getting a subset of objects from a bucket', () => {
next =>
metadata.putObjectMD(bucketName, 'key/three', '{}', log, next),
next =>
metadata.listObject(bucketName, 'ke', null, delimiter,
defaultLimit, log, next),
metadata.listObject(bucketName, { prefix: 'ke', delimiter,
maxKeys: defaultLimit }, log, next),
], (err, response) => {
assert(response.CommonPrefixes.indexOf('key/') > -1);
assert.strictEqual(isKeyInContents(response, 'key/'), false);
@ -154,8 +154,8 @@ describe('bucket API for getting a subset of objects from a bucket', () => {
'given and keys match before delimiter', done => {
metadata.putObjectMD(bucketName, 'noPrefix/one', '{}', log, () => {
metadata.putObjectMD(bucketName, 'noPrefix/two', '{}', log, () => {
metadata.listObject(bucketName, null, null, delimiter,
defaultLimit, log, (err, response) => {
metadata.listObject(bucketName, { delimiter,
maxKeys: defaultLimit }, log, (err, response) => {
assert(response.CommonPrefixes.indexOf('noPrefix/')
> -1);
assert.strictEqual(isKeyInContents(response,
@ -168,7 +168,8 @@ describe('bucket API for getting a subset of objects from a bucket', () => {
it('should return no grouped keys if no ' +
'delimiter specified in getBucketListObjects', done => {
metadata.listObject(bucketName, 'key', null, null, defaultLimit, log,
metadata.listObject(bucketName,
{ prefix: 'key', maxKeys: defaultLimit }, log,
(err, response) => {
assert.strictEqual(response.CommonPrefixes.length, 0);
done();
@ -179,7 +180,8 @@ describe('bucket API for getting a subset of objects from a bucket', () => {
'AFTER marker when no delimiter specified', done => {
metadata.putObjectMD(bucketName, 'a', '{}', log, () => {
metadata.putObjectMD(bucketName, 'b', '{}', log, () => {
metadata.listObject(bucketName, null, 'a', null, defaultLimit,
metadata.listObject(bucketName,
{ marker: 'a', maxKeys: defaultLimit },
log, (err, response) => {
assert(isKeyInContents(response, 'b'));
assert.strictEqual(isKeyInContents(response, 'a'),
@ -192,7 +194,8 @@ describe('bucket API for getting a subset of objects from a bucket', () => {
it('should only return keys occurring alphabetically AFTER ' +
'marker when delimiter specified', done => {
metadata.listObject(bucketName, null, 'a', delimiter, defaultLimit,
metadata.listObject(bucketName,
{ marker: 'a', delimiter, maxKeys: defaultLimit },
log, (err, response) => {
assert(isKeyInContents(response, 'b'));
assert.strictEqual(isKeyInContents(response, 'a'), false);
@ -202,7 +205,8 @@ describe('bucket API for getting a subset of objects from a bucket', () => {
it('should only return keys occurring alphabetically AFTER ' +
'marker when delimiter and prefix specified', done => {
metadata.listObject(bucketName, 'b', 'a', delimiter, defaultLimit,
metadata.listObject(bucketName,
{ prefix: 'b', marker: 'a', delimiter, maxKeys: defaultLimit },
log, (err, response) => {
assert(isKeyInContents(response, 'b'));
assert.strictEqual(isKeyInContents(response, 'a'), false);
@ -220,8 +224,9 @@ describe('bucket API for getting a subset of objects from a bucket', () => {
next =>
metadata.putObjectMD(bucketName, 'next1/', '{}', log, next),
next =>
metadata.listObject(bucketName, 'next', null, delimiter,
smallLimit, log, next),
metadata.listObject(bucketName,
{ prefix: 'next', delimiter, maxKeys: smallLimit },
log, next),
], (err, response) => {
assert(response.CommonPrefixes.indexOf('next/') > -1);
assert.strictEqual(response.CommonPrefixes.indexOf('next1/'), -1);
@ -302,7 +307,7 @@ describe('stress test for bucket API', function describe() {
assert.strictEqual(err, undefined);
done();
} else {
metadata.listObject(bucketName, null, null, delimiter, null,
metadata.listObject(bucketName, { delimiter },
log, (err, response) => {
// Stop timing and calculate millisecond time difference
const diff = timeDiff(startTime);
@ -320,7 +325,8 @@ describe('stress test for bucket API', function describe() {
it('should return all keys as Contents if delimiter ' +
'does not match and specify NextMarker', done => {
metadata.listObject(bucketName,
null, null, oddDelimiter, testLimit, log, (err, response) => {
{ delimiter: oddDelimiter, maxKeys: testLimit },
log, (err, response) => {
assert.strictEqual(response.CommonPrefixes.length, 0);
assert.strictEqual(response.Contents.length, testLimit);
assert.strictEqual(response.IsTruncated, true);
@ -331,7 +337,7 @@ describe('stress test for bucket API', function describe() {
it('should return only keys occurring ' +
'after specified marker', done => {
metadata.listObject(bucketName, null, testMarker, delimiter, null, log,
metadata.listObject(bucketName, { marker: testMarker, delimiter }, log,
(err, res) => {
assert.strictEqual(res.CommonPrefixes.length,
prefixes.length - 1);