Compare commits

...

2 Commits

Author SHA1 Message Date
Jonathan Gramain 3920d3c207 feature: ZENKO-1420 return locations from backbeat PUT routes
Return the locations array in the API of putObject and completeMPU
multiple backend routes, so that transition policies can use it to
update metadata with the new location as-is.
2019-03-12 21:36:20 -07:00
bbuchanan9 4879912bc8 feature: ZENKO-1529 Add managed transition report 2019-03-12 15:42:16 -07:00
4 changed files with 851 additions and 944 deletions

View File

@ -5,9 +5,10 @@ const httpProxy = require('http-proxy');
const backbeatProxy = httpProxy.createProxyServer({
ignorePath: true,
});
const { auth, errors, s3middleware, models } = require('arsenal');
const { auth, errors, s3middleware, models, storage } = require('arsenal');
const { responseJSONBody } = require('arsenal').s3routes.routesUtils;
const { getSubPartIds } = s3middleware.azureHelper.mpuUtils;
const { skipMpuPartProcessing } = storage.data.external.backendUtils;
const vault = require('../auth/vault');
const { data } = require('../data/wrapper');
const metadata = require('../metadata/wrapper');
@ -183,9 +184,64 @@ function getPartList(parts, objectKey, uploadId, storageLocation) {
} else {
partList.Part = parts;
}
return partList;
}
function generateMpuAggregateInfo(parts) {
let aggregateSize;
// CopyLocationTask does transmit a size for each part,
// MultipleBackendTask does not, so check if size is defined in
// the first part.
if (parts[0] && parts[0].Size) {
aggregateSize = parts.reduce(
(agg, part) => agg + Number.parseInt(part.Size[0], 10), 0);
}
return {
aggregateSize,
aggregateETag: s3middleware.processMpuParts.createAggregateETag(
parts.map(part => part.ETag[0])),
};
}
/**
* Helper to create the response object for putObject and completeMPU
*
* @param {object} params - response info
* @param {string} params.dataStoreName - name of location
* @param {string} params.dataStoreType - location type (e.g. "aws_s3")
* @param {string} params.key - object key
* @param {number} params.size - total byte length
* @param {string} params.dataStoreETag - object ETag
* @param {string} [params.dataStoreVersionId] - object version ID, if
* versioned
* @return {object} - the response object to serialize and send back
*/
function constructPutResponse(params) {
// FIXME: The main data locations array may eventually resemble
// locations stored in replication info object, i.e. without
// size/start for cloud locations, which could ease passing
// standard location objects across services. For now let's just
// create the location as they are usually stored in the
// "locations" attribute, with size/start info.
const location = [{
dataStoreName: params.dataStoreName,
dataStoreType: params.dataStoreType,
key: params.key,
start: 0,
size: params.size,
dataStoreETag: params.dataStoreETag,
dataStoreVersionId: params.dataStoreVersionId,
}];
return {
// TODO: Remove '' when versioning implemented for Azure.
versionId: params.dataStoreVersionId || '',
location,
};
}
function handleTaggingOperation(request, response, type, dataStoreVersionId,
log, callback) {
const storageLocation = request.headers['x-scal-storage-class'];
@ -401,11 +457,16 @@ function putObject(request, response, log, callback) {
if (contentMD5 !== md5) {
return callback(errors.BadDigest);
}
const dataRetrievalInfo = {
// TODO: Remove '' when versioning implemented for Azure.
versionId: retrievalInfo.dataStoreVersionId || '',
};
return _respond(response, dataRetrievalInfo, log, callback);
const responsePayload = constructPutResponse({
dataStoreName: retrievalInfo.dataStoreName,
dataStoreType: retrievalInfo.dataStoreType,
key: retrievalInfo.key,
size: payloadLen,
dataStoreETag: retrievalInfo.dataStoreETag ?
`1:${retrievalInfo.dataStoreETag}` : `1:${md5}`,
dataStoreVersionId: retrievalInfo.dataStoreVersionId,
});
return _respond(response, responsePayload, log, callback);
});
}
@ -577,8 +638,8 @@ function completeMultipartUpload(request, response, log, callback) {
// FIXME: add error type MalformedJSON
return callback(errors.MalformedPOSTRequest);
}
const partList =
getPartList(parts, request.objectKey, uploadId, storageLocation);
const partList = getPartList(
parts, request.objectKey, uploadId, storageLocation);
// Azure client will set user metadata at this point.
const metaHeaders = { 'x-amz-meta-scal-replication-status': 'REPLICA' };
if (sourceVersionId) {
@ -615,11 +676,31 @@ function completeMultipartUpload(request, response, log, callback) {
});
return callback(err);
}
const dataRetrievalInfo = {
// TODO: Remove '' when versioning implemented for Azure.
versionId: retrievalInfo.dataStoreVersionId || '',
};
return _respond(response, dataRetrievalInfo, log, callback);
// The logic here is an aggregate of code coming from
// lib/api/completeMultipartUpload.js.
const { key, dataStoreType, dataStoreVersionId } =
retrievalInfo;
let size;
let dataStoreETag;
if (skipMpuPartProcessing(retrievalInfo)) {
size = retrievalInfo.contentLength;
dataStoreETag = retrievalInfo.eTag;
} else {
const { aggregateSize, aggregateETag } =
generateMpuAggregateInfo(parts);
size = aggregateSize;
dataStoreETag = aggregateETag;
}
const responsePayload = constructPutResponse({
dataStoreName: storageLocation,
dataStoreType,
key,
size,
dataStoreETag,
dataStoreVersionId,
});
return _respond(response, responsePayload, log, callback);
});
});
return undefined;

View File

@ -38,6 +38,7 @@ function getCapabilities() {
locationTypeCephRadosGW: true,
preferredReadLocation: true,
managedLifecycle: true,
managedLifecycleTransition: true,
secureChannelOptimizedPath: hasWSOptionalDependencies(),
s3cIngestLocation: true,
};

1685
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -19,7 +19,7 @@
},
"homepage": "https://github.com/scality/S3#readme",
"dependencies": {
"arsenal": "github:scality/arsenal#f199d52",
"arsenal": "github:scality/arsenal#6a04e6c",
"async": "~2.5.0",
"aws-sdk": "2.28.0",
"azure-storage": "^2.1.0",