Compare commits

...

8 Commits

Author SHA1 Message Date
Taylor McKinnon 924bfac875 fix typo 2020-08-15 10:09:58 -07:00
Taylor McKinnon 213ac2b11f fix objectCopy 2020-08-15 10:09:53 -07:00
Taylor McKinnon 6bfc81e3ab bf(S3C-3263): Fix objectDelta field values 2020-08-15 10:09:46 -07:00
Rahul Padigela 31db42f9da improvement: parameterize nodejs version for eve 2020-08-12 16:03:25 -07:00
Rahul Padigela 2103f88743 improvement: pin packages to hotfix branches 2020-08-12 15:28:02 -07:00
Dora Korpar 463da9bdf7 bf: S3C-3246 getObject pushMetric missing key
(cherry picked from commit a37eab03ce)
2020-08-11 12:22:15 -07:00
Dora Korpar 698c7ee43c ft: S3C-3132 update apis with new metric info
(cherry picked from commit 651f62ef61)
2020-08-07 16:09:40 -07:00
Dora Korpar 51026ce324 ft: S3C-3132 Support utapi pushmetric v2
(cherry picked from commit 1aefa45548)
2020-08-07 16:09:22 -07:00
24 changed files with 3791 additions and 144 deletions

View File

@ -4,6 +4,7 @@ FROM buildpack-deps:xenial-curl
# Install packages needed by the buildchain
#
ENV LANG C.UTF-8
ENV NODEJS_VERSION="10"
COPY ./s3_packages.list ./buildbot_worker_packages.list /tmp/
RUN curl -sS http://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
&& echo "deb http://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list \
@ -11,7 +12,7 @@ RUN curl -sS http://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - \
&& cat /tmp/*packages.list | xargs apt-get install -y \
&& git clone https://github.com/tj/n.git \
&& make -C ./n \
&& n 10 \
&& n $NODEJS_VERSION \
&& pip install pip==9.0.1 \
&& rm -rf ./n \
&& rm -rf /var/lib/apt/lists/* \

View File

@ -439,6 +439,9 @@ function completeMultipartUpload(authInfo, request, log, callback) {
canonicalID: destinationBucket.getOwner(),
bucket: bucketName,
keys: [objectKey],
versionId: generatedVersionId,
numberOfObjects: !generatedVersionId && oldByteLength !== null ? 0 : 1,
location: destinationBucket.getLocationConstraint(),
});
return callback(null, xml, resHeaders);
});

View File

@ -124,7 +124,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
}
function _getMPUBucket(destinationBucket, log, corsHeaders,
uploadId, cipherBundle, callback) {
uploadId, cipherBundle, locConstraint, callback) {
const xmlParams = {
bucketName,
objectKey,
@ -160,6 +160,8 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
authInfo,
bucket: bucketName,
keys: [objectKey],
location: locConstraint,
numberOfObjects: 1,
});
return callback(null, xml, corsHeaders);
});
@ -245,14 +247,14 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
uploadId = UUID.v4().replace(/-/g, '');
}
return _getMPUBucket(destinationBucket, log, corsHeaders,
uploadId, cipherBundle, callback);
uploadId, cipherBundle, locConstraint, callback);
});
}
// Generate uniqueID without dashes so that routing not messed up
uploadId = UUID.v4().replace(/-/g, '');
return _getMPUBucket(destinationBucket, log, corsHeaders,
uploadId, cipherBundle, callback);
uploadId, cipherBundle, locConstraint, callback);
}
metadataValidateBucketAndObj(metadataValParams, log,

View File

@ -501,6 +501,7 @@ function multiObjectDelete(authInfo, request, log, callback) {
keys: deletedKeys,
byteLength: Number.parseInt(totalContentLengthDeleted, 10),
numberOfObjects: numOfObjectsRemoved,
isDelete: true,
});
return callback(null, xml, corsHeaders);
});

View File

@ -50,6 +50,7 @@ function multipartDelete(authInfo, request, log, callback) {
bucket: bucketName,
keys: [objectKey],
byteLength: partSizeSum,
location,
});
}
return callback(null, corsHeaders);

View File

@ -516,6 +516,10 @@ function objectCopy(authInfo, request, sourceBucket,
additionalHeaders['x-amz-version-id'] =
versionIdUtils.encode(storingNewMdResult.versionId);
}
// Only pre-existing non-versioned objects get 0 all others use 1
const numberOfObjects = !isVersioned && destObjPrevSize !== null ? 0 : 1;
pushMetric('copyObject', log, {
authInfo,
canonicalID: destBucketMD.getOwner(),
@ -523,6 +527,9 @@ function objectCopy(authInfo, request, sourceBucket,
keys: [destObjectKey],
newByteLength: sourceObjSize,
oldByteLength: isVersioned ? null : destObjPrevSize,
location: storeMetadataParams.dataStoreName,
versionId: isVersioned ? storingNewMdResult.versionId : undefined,
numberOfObjects,
});
// Add expiration header if lifecycle enabled
return callback(null, xml, additionalHeaders);

View File

@ -164,8 +164,13 @@ function objectDelete(authInfo, request, log, cb) {
resHeaders['x-amz-version-id'] = result.versionId === 'null' ?
result.versionId : versionIdUtils.encode(result.versionId);
}
pushMetric('putDeleteMarkerObject', log, { authInfo,
bucket: bucketName, keys: [objectKey] });
pushMetric('putDeleteMarkerObject', log, {
authInfo,
bucket: bucketName,
keys: [objectKey],
versionId: result.versionId,
location: objectMD ? objectMD.dataStoreName : undefined,
});
} else {
log.end().addDefaultFields({
contentLength: objectMD['content-length'],
@ -176,7 +181,10 @@ function objectDelete(authInfo, request, log, cb) {
bucket: bucketName,
keys: [objectKey],
byteLength: Number.parseInt(objectMD['content-length'], 10),
numberOfObjects: 1 });
numberOfObjects: 1,
location: objectMD.dataStoreName,
isDelete: true,
});
}
return cb(err, resHeaders);
});

View File

@ -101,6 +101,8 @@ function objectDeleteTagging(authInfo, request, log, callback) {
authInfo,
bucket: bucketName,
keys: [objectKey],
versionId: objectMD ? objectMD.versionId : undefined,
location: objectMD ? objectMD.dataStoreName : undefined,
});
const verCfg = bucket.getVersioningConfiguration();
additionalResHeaders['x-amz-version-id'] =

View File

@ -197,8 +197,11 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
pushMetric('getObject', log, {
authInfo,
bucket: bucketName,
keys: [objectKey],
newByteLength:
Number.parseInt(responseMetaHeaders['Content-Length'], 10),
versionId: objMD.versionId,
location: objMD.dataStoreName,
});
return callback(null, dataLocator, responseMetaHeaders,
byteRange);

View File

@ -215,6 +215,9 @@ function objectGetACL(authInfo, request, log, callback) {
pushMetric('getObjectAcl', log, {
authInfo,
bucket: bucketName,
keys: [objectKey],
versionId: resVersionId,
location: bucket ? bucket.getLocationConstraint() : undefined,
});
resHeaders['x-amz-version-id'] = resVersionId;
return callback(null, xml, resHeaders);

View File

@ -92,6 +92,9 @@ function objectGetLegalHold(authInfo, request, log, callback) {
pushMetric('getObjectLegalHold', log, {
authInfo,
bucket: bucketName,
keys: [objectKey],
versionId: objectMD ? objectMD.versionId : undefined,
location: objectMD ? objectMD.dataStoreName : undefined,
});
const verCfg = bucket.getVersioningConfiguration();
additionalResHeaders['x-amz-version-id'] =

View File

@ -92,6 +92,9 @@ function objectGetRetention(authInfo, request, log, callback) {
pushMetric('getObjectRetention', log, {
authInfo,
bucket: bucketName,
keys: [objectKey],
versionId: objectMD ? objectMD.versionId : undefined,
location: objectMD ? objectMD.dataStoreName : undefined,
});
const verCfg = bucket.getVersioningConfiguration();
additionalResHeaders['x-amz-version-id'] =

View File

@ -83,6 +83,9 @@ function objectGetTagging(authInfo, request, log, callback) {
pushMetric('getObjectTagging', log, {
authInfo,
bucket: bucketName,
keys: [objectKey],
versionId: objectMD ? objectMD.versionId : undefined,
location: objectMD ? objectMD.dataStoreName : undefined,
});
const verCfg = bucket.getVersioningConfiguration();
additionalResHeaders['x-amz-version-id'] =

View File

@ -128,7 +128,13 @@ function objectHead(authInfo, request, log, callback) {
}
responseHeaders['content-length'] = partSize;
}
pushMetric('headObject', log, { authInfo, bucket: bucketName });
pushMetric('headObject', log, {
authInfo,
bucket: bucketName,
keys: [objectKey],
versionId: objMD ? objMD.versionId : undefined,
location: objMD ? objMD.dataStoreName : undefined,
});
return callback(null, responseHeaders);
});
}

View File

@ -130,6 +130,10 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
versionIdUtils.encode(storingResult.versionId);
}
}
// Only pre-existing non-versioned objects get 0 all others use 1
const numberOfObjects = !isVersionedObj && oldByteLength !== null ? 0 : 1;
// only the bucket owner's metrics should be updated, regardless of
// who the requester is
pushMetric('putObject', log, {
@ -139,6 +143,9 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
keys: [objectKey],
newByteLength,
oldByteLength: isVersionedObj ? null : oldByteLength,
versionId: isVersionedObj && storingResult ? storingResult.versionId : undefined,
location: bucket.getLocationConstraint(),
numberOfObjects,
});
return callback(null, responseHeaders);
});

View File

@ -300,6 +300,8 @@ function objectPutACL(authInfo, request, log, cb) {
authInfo,
bucket: bucketName,
keys: [objectKey],
versionId: objectMD ? objectMD.versionId : undefined,
location: objectMD ? objectMD.dataStoreName : undefined,
});
return cb(null, resHeaders);
});

View File

@ -363,6 +363,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
keys: [destObjectKey],
newByteLength: copyObjectSize,
oldByteLength: prevObjectSize,
location: destBucketMD.getLocationConstraint(),
});
return callback(null, xml, additionalHeaders);
});

View File

@ -104,6 +104,8 @@ function objectPutLegalHold(authInfo, request, log, callback) {
authInfo,
bucket: bucketName,
keys: [objectKey],
versionId: objectMD ? objectMD.versionId : undefined,
location: objectMD ? objectMD.dataStoreName : undefined,
});
const verCfg = bucket.getVersioningConfiguration();
additionalResHeaders['x-amz-version-id'] =

View File

@ -373,6 +373,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
keys: [objectKey],
newByteLength: size,
oldByteLength: prevObjectSize,
location: destinationBucket.getLocationConstraint(),
});
return cb(null, hexDigest, corsHeaders);
});

View File

@ -105,6 +105,8 @@ function objectPutRetention(authInfo, request, log, callback) {
authInfo,
bucket: bucketName,
keys: [objectKey],
versionId: objectMD ? objectMD.versionId : undefined,
location: objectMD ? objectMD.dataStoreName : undefined,
});
const verCfg = bucket.getVersioningConfiguration();
additionalResHeaders['x-amz-version-id'] =

View File

@ -107,6 +107,8 @@ function objectPutTagging(authInfo, request, log, callback) {
authInfo,
bucket: bucketName,
keys: [objectKey],
versionId: objectMD ? objectMD.versionId : undefined,
location: objectMD ? objectMD.dataStoreName : undefined,
});
const verCfg = bucket.getVersioningConfiguration();
additionalResHeaders['x-amz-version-id'] =

View File

@ -2,7 +2,7 @@ const http = require('http');
const https = require('https');
const commander = require('commander');
const { auth } = require('arsenal');
const { UtapiClient } = require('utapi');
const { UtapiClient, utapiVersion } = require('utapi');
const logger = require('../utilities/logger');
const _config = require('../Config').config;
// setup utapi client
@ -19,6 +19,23 @@ const bucketOwnerMetrics = [
'uploadPart',
];
function evalAuthInfo(authInfo, canonicalID, action) {
let accountId = authInfo.getCanonicalID();
let userId = authInfo.isRequesterAnIAMUser() ?
authInfo.getShortid() : undefined;
// If action impacts 'numberOfObjectsStored' or 'storageUtilized' metric
// only the bucket owner account's metrics should be updated
const canonicalIdMatch = authInfo.getCanonicalID() === canonicalID;
if (bucketOwnerMetrics.includes(action) && !canonicalIdMatch) {
accountId = canonicalID;
userId = undefined;
}
return {
accountId,
userId,
};
}
function _listMetrics(host,
port,
metric,
@ -201,13 +218,47 @@ function listMetrics(metricType) {
* @param {number} [metricObj.newByteLength] - (optional) new object size
* @param {number|null} [metricObj.oldByteLength] - (optional) old object size
* (obj. overwrites)
* @param {number} [metricObj.numberOfObjects] - (optional) number of obects
* @param {number} [metricObj.numberOfObjects] - (optional) number of objects
* added/deleted
* @param {boolean} [metricObject].isDelete - (optional) Indicates whether this
* is a delete operation
* @return {function} - `utapi.pushMetric`
*/
function pushMetric(action, log, metricObj) {
const { bucket, keys, byteLength, newByteLength,
oldByteLength, numberOfObjects, authInfo, canonicalID } = metricObj;
const {
bucket,
keys,
versionId,
byteLength,
newByteLength,
oldByteLength,
numberOfObjects,
authInfo,
canonicalID,
location,
isDelete,
} = metricObj;
if (utapiVersion === 2) {
const utapiObj = {
operationId: action,
bucket,
location,
objectDelta: isDelete ? -numberOfObjects : numberOfObjects,
sizeDelta: oldByteLength ? newByteLength - oldByteLength : newByteLength,
incomingBytes: newByteLength,
outgoingBytes: action === 'getObject' ? newByteLength : 0,
};
if (keys && keys.length === 1) {
[utapiObj.object] = keys;
if (versionId) {
utapiObj.versionId = versionId;
}
}
utapiObj.account = authInfo ? evalAuthInfo(authInfo, canonicalID, action).accountId : canonicalID;
utapiObj.user = authInfo ? evalAuthInfo(authInfo, canonicalID, action).userId : '';
return utapi.pushMetric(utapiObj);
}
const utapiObj = {
bucket,
keys,
@ -220,16 +271,9 @@ function pushMetric(action, log, metricObj) {
// account-level metrics and the shortId for user-level metrics. Otherwise
// check if the canonical ID is already provided for account-level metrics.
if (authInfo) {
utapiObj.accountId = authInfo.getCanonicalID();
utapiObj.userId = authInfo.isRequesterAnIAMUser() ?
authInfo.getShortid() : undefined;
// If action impacts 'numberOfObjectsStored' or 'storageUtilized' metric
// only the bucket owner account's metrics should be updated
const canonicalIdMatch = authInfo.getCanonicalID() === canonicalID;
if (bucketOwnerMetrics.includes(action) && !canonicalIdMatch) {
utapiObj.accountId = canonicalID;
utapiObj.userId = undefined;
}
const { accountId, userId } = evalAuthInfo(authInfo, canonicalID, action);
utapiObj.accountId = accountId;
utapiObj.userId = userId;
} else if (canonicalID) {
utapiObj.accountId = canonicalID;
}

View File

@ -19,11 +19,11 @@
},
"homepage": "https://github.com/scality/S3#readme",
"dependencies": {
"arsenal": "github:scality/Arsenal#e6622df",
"arsenal": "github:scality/Arsenal#hotfix/7.7.0",
"async": "~2.5.0",
"aws-sdk": "2.363.0",
"azure-storage": "^2.1.0",
"bucketclient": "scality/bucketclient#6d2d5a4",
"bucketclient": "scality/bucketclient#hotfix/7.7.0",
"commander": "^2.9.0",
"cron-parser": "^2.11.0",
"diskusage": "1.1.3",
@ -32,12 +32,12 @@
"moment": "^2.26.0",
"node-uuid": "^1.4.3",
"npm-run-all": "~4.1.5",
"sproxydclient": "scality/sproxydclient#44f025b",
"utapi": "scality/utapi#89ede12",
"sproxydclient": "scality/sproxydclient#hotfix/7.7.0",
"utapi": "scality/utapi#hotfix/7.7.0",
"utf8": "~2.1.1",
"uuid": "^3.0.1",
"vaultclient": "scality/vaultclient#21d03b1",
"werelogs": "scality/werelogs#0a4c576",
"vaultclient": "scality/vaultclient#hotfix/7.7.0",
"werelogs": "scality/werelogs#hotfix/7.7.0",
"xml2js": "~0.4.16"
},
"devDependencies": {

3770
yarn.lock

File diff suppressed because it is too large Load Diff