Compare commits
37 Commits
developmen
...
feature/CL
Author | SHA1 | Date |
---|---|---|
williamlardier | d85efe8f79 | |
williamlardier | a45cb8340f | |
williamlardier | 156f971970 | |
williamlardier | 2228657bbe | |
williamlardier | 0402466a06 | |
williamlardier | 9dac38ef57 | |
williamlardier | f31e4a4b08 | |
williamlardier | 98edc0c696 | |
williamlardier | 21c9055fa1 | |
williamlardier | 4429f37366 | |
williamlardier | 9578819cac | |
williamlardier | f5bcceda2c | |
williamlardier | 98d06e4b1b | |
williamlardier | 6a6a7763d2 | |
williamlardier | 0b96e4ef4d | |
williamlardier | 04e39940c0 | |
Maha Benzekri | a9e65ef91e | |
Maha Benzekri | 4dd2b06e10 | |
Maha Benzekri | d2eafe4aa6 | |
Maha Benzekri | be486d3303 | |
Maha Benzekri | c8ade032c6 | |
Maha Benzekri | b45c80fa18 | |
Maha Benzekri | 8fb0569b5e | |
Maha Benzekri | 28e697b95f | |
Maha Benzekri | 9a4873379e | |
Maha Benzekri | 9df036137d | |
Maha Benzekri | 208bb0d3fb | |
Maha Benzekri | 8b3bb32e8a | |
Maha Benzekri | 7e3130c071 | |
Maha Benzekri | fd9140e1d1 | |
Maha Benzekri | 026cf9d4d1 | |
Maha Benzekri | f36becbc25 | |
Maha Benzekri | b6bea08b90 | |
Maha Benzekri | 837cdb2705 | |
Maha Benzekri | 6c5b5a0bf5 | |
Maha Benzekri | 1ca4ffadd4 | |
Maha Benzekri | f906076a0e |
|
@ -40,6 +40,9 @@ services:
|
||||||
- DEFAULT_BUCKET_KEY_FORMAT
|
- DEFAULT_BUCKET_KEY_FORMAT
|
||||||
- METADATA_MAX_CACHED_BUCKETS
|
- METADATA_MAX_CACHED_BUCKETS
|
||||||
- ENABLE_NULL_VERSION_COMPAT_MODE
|
- ENABLE_NULL_VERSION_COMPAT_MODE
|
||||||
|
- SCUBA_HOST
|
||||||
|
- SCUBA_PORT
|
||||||
|
- SCUBA_HEALTHCHECK_FREQUENCY
|
||||||
env_file:
|
env_file:
|
||||||
- creds.env
|
- creds.env
|
||||||
depends_on:
|
depends_on:
|
||||||
|
|
|
@ -363,6 +363,40 @@ jobs:
|
||||||
source: /tmp/artifacts
|
source: /tmp/artifacts
|
||||||
if: always()
|
if: always()
|
||||||
|
|
||||||
|
scuba-tests:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
env:
|
||||||
|
S3BACKEND: mem
|
||||||
|
SCUBA_HOST: localhost
|
||||||
|
SCUBA_PORT: 8100
|
||||||
|
SCUBA_HEALTHCHECK_FREQUENCY: 100
|
||||||
|
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
|
||||||
|
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
|
||||||
|
JOB_NAME: ${{ github.job }}
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Setup CI environment
|
||||||
|
uses: ./.github/actions/setup-ci
|
||||||
|
- name: Setup CI services
|
||||||
|
run: docker compose up -d
|
||||||
|
working-directory: .github/docker
|
||||||
|
- name: Run scuba tests
|
||||||
|
run: |-
|
||||||
|
set -ex -o pipefail;
|
||||||
|
bash wait_for_local_port.bash 8000 40
|
||||||
|
yarn run test_scuba | tee /tmp/artifacts/${{ github.job }}/tests.log
|
||||||
|
- name: Upload logs to artifacts
|
||||||
|
uses: scality/action-artifacts@v3
|
||||||
|
with:
|
||||||
|
method: upload
|
||||||
|
url: https://artifacts.scality.net
|
||||||
|
user: ${{ secrets.ARTIFACTS_USER }}
|
||||||
|
password: ${{ secrets.ARTIFACTS_PASSWORD }}
|
||||||
|
source: /tmp/artifacts
|
||||||
|
if: always()
|
||||||
|
|
||||||
kmip-ft-tests:
|
kmip-ft-tests:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: build
|
needs: build
|
||||||
|
|
|
@ -23,6 +23,7 @@ RUN apt-get update \
|
||||||
|
|
||||||
ENV PYTHON=python3
|
ENV PYTHON=python3
|
||||||
COPY package.json yarn.lock /usr/src/app/
|
COPY package.json yarn.lock /usr/src/app/
|
||||||
|
RUN npm install typescript -g
|
||||||
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
|
RUN yarn install --production --ignore-optional --frozen-lockfile --ignore-engines --network-concurrency 1
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
|
|
|
@ -243,6 +243,9 @@ const constants = {
|
||||||
'objectPutPart',
|
'objectPutPart',
|
||||||
'completeMultipartUpload',
|
'completeMultipartUpload',
|
||||||
],
|
],
|
||||||
|
// if requester is not bucket owner, bucket policy actions should be denied with
|
||||||
|
// MethodNotAllowed error
|
||||||
|
onlyOwnerAllowed: ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'],
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = constants;
|
module.exports = constants;
|
||||||
|
|
|
@ -1117,6 +1117,30 @@ class Config extends EventEmitter {
|
||||||
this.redis.password = config.redis.password;
|
this.redis.password = config.redis.password;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (config.scuba) {
|
||||||
|
if (config.scuba.host) {
|
||||||
|
assert(typeof config.scuba.host === 'string',
|
||||||
|
'bad config: scuba host must be a string');
|
||||||
|
this.scuba = { host: config.scuba.host };
|
||||||
|
}
|
||||||
|
if (config.scuba.port) {
|
||||||
|
assert(Number.isInteger(config.scuba.port)
|
||||||
|
&& config.scuba.port > 0,
|
||||||
|
'bad config: scuba port must be a positive integer');
|
||||||
|
this.scuba.port = config.scuba.port;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (process.env.SCUBA_HOST && process.env.SCUBA_PORT) {
|
||||||
|
assert(typeof process.env.SCUBA_HOST === 'string',
|
||||||
|
'bad config: scuba host must be a string');
|
||||||
|
assert(Number.isInteger(Number(process.env.SCUBA_PORT))
|
||||||
|
&& Number(process.env.SCUBA_PORT) > 0,
|
||||||
|
'bad config: scuba port must be a positive integer');
|
||||||
|
this.scuba = {
|
||||||
|
host: process.env.SCUBA_HOST,
|
||||||
|
port: Number(process.env.SCUBA_PORT),
|
||||||
|
};
|
||||||
|
}
|
||||||
if (config.utapi) {
|
if (config.utapi) {
|
||||||
this.utapi = { component: 's3' };
|
this.utapi = { component: 's3' };
|
||||||
if (config.utapi.host) {
|
if (config.utapi.host) {
|
||||||
|
|
|
@ -7,6 +7,7 @@ const bucketDeleteEncryption = require('./bucketDeleteEncryption');
|
||||||
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
|
const bucketDeleteWebsite = require('./bucketDeleteWebsite');
|
||||||
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
|
const bucketDeleteLifecycle = require('./bucketDeleteLifecycle');
|
||||||
const bucketDeletePolicy = require('./bucketDeletePolicy');
|
const bucketDeletePolicy = require('./bucketDeletePolicy');
|
||||||
|
const bucketDeleteQuota = require('./bucketDeleteQuota');
|
||||||
const { bucketGet } = require('./bucketGet');
|
const { bucketGet } = require('./bucketGet');
|
||||||
const bucketGetACL = require('./bucketGetACL');
|
const bucketGetACL = require('./bucketGetACL');
|
||||||
const bucketGetCors = require('./bucketGetCors');
|
const bucketGetCors = require('./bucketGetCors');
|
||||||
|
@ -17,6 +18,7 @@ const bucketGetLifecycle = require('./bucketGetLifecycle');
|
||||||
const bucketGetNotification = require('./bucketGetNotification');
|
const bucketGetNotification = require('./bucketGetNotification');
|
||||||
const bucketGetObjectLock = require('./bucketGetObjectLock');
|
const bucketGetObjectLock = require('./bucketGetObjectLock');
|
||||||
const bucketGetPolicy = require('./bucketGetPolicy');
|
const bucketGetPolicy = require('./bucketGetPolicy');
|
||||||
|
const bucketGetQuota = require('./bucketGetQuota');
|
||||||
const bucketGetEncryption = require('./bucketGetEncryption');
|
const bucketGetEncryption = require('./bucketGetEncryption');
|
||||||
const bucketHead = require('./bucketHead');
|
const bucketHead = require('./bucketHead');
|
||||||
const { bucketPut } = require('./bucketPut');
|
const { bucketPut } = require('./bucketPut');
|
||||||
|
@ -33,6 +35,7 @@ const bucketPutNotification = require('./bucketPutNotification');
|
||||||
const bucketPutEncryption = require('./bucketPutEncryption');
|
const bucketPutEncryption = require('./bucketPutEncryption');
|
||||||
const bucketPutPolicy = require('./bucketPutPolicy');
|
const bucketPutPolicy = require('./bucketPutPolicy');
|
||||||
const bucketPutObjectLock = require('./bucketPutObjectLock');
|
const bucketPutObjectLock = require('./bucketPutObjectLock');
|
||||||
|
const bucketUpdateQuota = require('./bucketUpdateQuota');
|
||||||
const bucketGetReplication = require('./bucketGetReplication');
|
const bucketGetReplication = require('./bucketGetReplication');
|
||||||
const bucketDeleteReplication = require('./bucketDeleteReplication');
|
const bucketDeleteReplication = require('./bucketDeleteReplication');
|
||||||
const corsPreflight = require('./corsPreflight');
|
const corsPreflight = require('./corsPreflight');
|
||||||
|
@ -148,6 +151,7 @@ const api = {
|
||||||
function checkAuthResults(authResults) {
|
function checkAuthResults(authResults) {
|
||||||
let returnTagCount = true;
|
let returnTagCount = true;
|
||||||
const isImplicitDeny = {};
|
const isImplicitDeny = {};
|
||||||
|
let accountQuotas = {};
|
||||||
let isOnlyImplicitDeny = true;
|
let isOnlyImplicitDeny = true;
|
||||||
if (apiMethod === 'objectGet') {
|
if (apiMethod === 'objectGet') {
|
||||||
// first item checks s3:GetObject(Version) action
|
// first item checks s3:GetObject(Version) action
|
||||||
|
@ -180,12 +184,13 @@ const api = {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
accountQuotas = authResults?.[0]?.accountQuota;
|
||||||
// These two APIs cannot use ACLs or Bucket Policies, hence, any
|
// These two APIs cannot use ACLs or Bucket Policies, hence, any
|
||||||
// implicit deny from vault must be treated as an explicit deny.
|
// implicit deny from vault must be treated as an explicit deny.
|
||||||
if ((apiMethod === 'bucketPut' || apiMethod === 'serviceGet') && isOnlyImplicitDeny) {
|
if ((apiMethod === 'bucketPut' || apiMethod === 'serviceGet') && isOnlyImplicitDeny) {
|
||||||
return errors.AccessDenied;
|
return errors.AccessDenied;
|
||||||
}
|
}
|
||||||
return { returnTagCount, isImplicitDeny };
|
return { returnTagCount, isImplicitDeny, accountQuotas };
|
||||||
}
|
}
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
|
@ -268,6 +273,7 @@ const api = {
|
||||||
}
|
}
|
||||||
returnTagCount = checkedResults.returnTagCount;
|
returnTagCount = checkedResults.returnTagCount;
|
||||||
request.actionImplicitDenies = checkedResults.isImplicitDeny;
|
request.actionImplicitDenies = checkedResults.isImplicitDeny;
|
||||||
|
request.accountQuotas = checkedResults.accountQuotas;
|
||||||
} else {
|
} else {
|
||||||
// create an object of keys apiMethods with all values to false:
|
// create an object of keys apiMethods with all values to false:
|
||||||
// for backward compatibility, all apiMethods are allowed by default
|
// for backward compatibility, all apiMethods are allowed by default
|
||||||
|
@ -276,6 +282,10 @@ const api = {
|
||||||
acc[curr] = false;
|
acc[curr] = false;
|
||||||
return acc;
|
return acc;
|
||||||
}, {});
|
}, {});
|
||||||
|
request.accountQuotas = apiMethods.reduce((acc, curr) => {
|
||||||
|
acc[curr] = undefined;
|
||||||
|
return acc;
|
||||||
|
}, {});
|
||||||
}
|
}
|
||||||
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
|
||||||
request._response = response;
|
request._response = response;
|
||||||
|
@ -316,11 +326,14 @@ const api = {
|
||||||
bucketPutReplication,
|
bucketPutReplication,
|
||||||
bucketGetReplication,
|
bucketGetReplication,
|
||||||
bucketDeleteReplication,
|
bucketDeleteReplication,
|
||||||
|
bucketDeleteQuota,
|
||||||
bucketPutLifecycle,
|
bucketPutLifecycle,
|
||||||
|
bucketUpdateQuota,
|
||||||
bucketGetLifecycle,
|
bucketGetLifecycle,
|
||||||
bucketDeleteLifecycle,
|
bucketDeleteLifecycle,
|
||||||
bucketPutPolicy,
|
bucketPutPolicy,
|
||||||
bucketGetPolicy,
|
bucketGetPolicy,
|
||||||
|
bucketGetQuota,
|
||||||
bucketDeletePolicy,
|
bucketDeletePolicy,
|
||||||
bucketPutObjectLock,
|
bucketPutObjectLock,
|
||||||
bucketPutNotification,
|
bucketPutNotification,
|
||||||
|
|
|
@ -1,8 +1,9 @@
|
||||||
const { evaluators, actionMaps, RequestContext, requestUtils } = require('arsenal').policies;
|
const { evaluators, actionMaps, actionNeedQuotaCheck, RequestContext, requestUtils } = require('arsenal').policies;
|
||||||
const { errors } = require('arsenal');
|
const { errors } = require('arsenal');
|
||||||
const { parseCIDR, isValid } = require('ipaddr.js');
|
const { parseCIDR, isValid } = require('ipaddr.js');
|
||||||
const constants = require('../../../../constants');
|
const constants = require('../../../../constants');
|
||||||
const { config } = require('../../../Config');
|
const { config } = require('../../../Config');
|
||||||
|
const { ScubaClientInstance } = require('../../../scuba/wrapper');
|
||||||
|
|
||||||
const {
|
const {
|
||||||
allAuthedUsersId,
|
allAuthedUsersId,
|
||||||
|
@ -372,6 +373,124 @@ function processBucketPolicy(requestType, bucket, canonicalID, arn, bucketOwner,
|
||||||
return processedResult;
|
return processedResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param {BucketInfo} bucket - bucket object
|
||||||
|
* @param {Account} account - account object
|
||||||
|
* @param {array} apiNames - action names: operations to authorize
|
||||||
|
* @param {string} apiMethod - the main API call
|
||||||
|
* @param {number} inflight - inflight bytes
|
||||||
|
* @param {Logger} log - logger
|
||||||
|
* @param {function} callback - callback
|
||||||
|
* @returns {boolean} - true if the quota is valid, false otherwise
|
||||||
|
*/
|
||||||
|
async function validateQuotas(bucket, account, apiNames, apiMethod, inflight, log, callback) {
|
||||||
|
console.log('evaluate quota with', bucket, account, apiNames, apiMethod, inflight)
|
||||||
|
const bucketQuota = bucket.getQuota();
|
||||||
|
const accountQuota = account?.quota || 0;
|
||||||
|
let bucketQuotaExceeded = false;
|
||||||
|
let accountQuotaExceeded = false;
|
||||||
|
|
||||||
|
if ((bucketQuota <= 0 && accountQuota <= 0) || !ScubaClientInstance?.enabled) {
|
||||||
|
if (bucketQuota > 0 || accountQuota > 0) {
|
||||||
|
log.warn('quota is set for a bucket, but scuba is disabled', {
|
||||||
|
bucketName: bucket.getName(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return callback();
|
||||||
|
}
|
||||||
|
const creationDate = new Date(bucket.getCreationDate()).getTime();
|
||||||
|
|
||||||
|
try {
|
||||||
|
// A potential optimiation, if inflights are disabled, is to only evaluate
|
||||||
|
// the lowest quota.
|
||||||
|
// eslint-disable-next-line no-restricted-syntax
|
||||||
|
for (const apiName of apiNames) {
|
||||||
|
let shouldEvaluateCopyObject = false;
|
||||||
|
if (apiName === 'objectGet' && (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart')) {
|
||||||
|
shouldEvaluateCopyObject = true;
|
||||||
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
inflight = Math.abs(inflight);
|
||||||
|
}
|
||||||
|
if (!shouldEvaluateCopyObject && !actionNeedQuotaCheck[apiName]) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// eslint-disable-next-line no-await-in-loop
|
||||||
|
const bucketMetrics = await ScubaClientInstance.getLatestMetrics('bucket',
|
||||||
|
`${bucket.getName()}_${creationDate}`, null, {
|
||||||
|
action: apiName,
|
||||||
|
inflight,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (bucketMetrics.bytesTotal > bucketQuota) {
|
||||||
|
log.debug('Bucket quota exceeded', {
|
||||||
|
bucket: bucket.getName(),
|
||||||
|
action: apiName,
|
||||||
|
inflight,
|
||||||
|
quota: bucketQuota,
|
||||||
|
bytesTotal: bucketMetrics.bytesTotal,
|
||||||
|
});
|
||||||
|
bucketQuotaExceeded = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (accountQuota > 0 && account?.account) {
|
||||||
|
// eslint-disable-next-line no-await-in-loop
|
||||||
|
const accountMetrics = await ScubaClientInstance.getLatestMetrics('account',
|
||||||
|
`${account.account}_${creationDate}`, null, {
|
||||||
|
action: apiName,
|
||||||
|
inflight,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (accountMetrics.bytesTotal > account.quota) {
|
||||||
|
log.debug('Account quota exceeded', {
|
||||||
|
accountId: account.account,
|
||||||
|
action: apiName,
|
||||||
|
inflight,
|
||||||
|
quota: account.quota,
|
||||||
|
bytesTotal: accountMetrics.bytesTotal,
|
||||||
|
});
|
||||||
|
accountQuotaExceeded = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bucketQuotaExceeded || accountQuotaExceeded) {
|
||||||
|
if (apiMethod?.endsWith('Delete')) {
|
||||||
|
return callback();
|
||||||
|
}
|
||||||
|
// clean any inflight bytes
|
||||||
|
if (inflight > 0) {
|
||||||
|
// eslint-disable-next-line no-await-in-loop
|
||||||
|
await ScubaClientInstance.getLatestMetrics('bucket',
|
||||||
|
`${bucket.getName()}_${creationDate}`, null, {
|
||||||
|
action: apiMethod,
|
||||||
|
inflight: -inflight,
|
||||||
|
});
|
||||||
|
if (account?.quota) {
|
||||||
|
// eslint-disable-next-line no-await-in-loop
|
||||||
|
await ScubaClientInstance.getLatestMetrics('account',
|
||||||
|
`${account.account}_${creationDate}`, null, {
|
||||||
|
action: apiMethod,
|
||||||
|
inflight: -inflight,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return callback(errors.QuotaExceeded);
|
||||||
|
}
|
||||||
|
|
||||||
|
return callback();
|
||||||
|
} catch (err) {
|
||||||
|
log.warn('Error getting metrics from scuba, allowing the request', {
|
||||||
|
error: err.name,
|
||||||
|
description: err.message,
|
||||||
|
});
|
||||||
|
if (bucketQuotaExceeded || accountQuotaExceeded) {
|
||||||
|
return callback(errors.QuotaExceeded);
|
||||||
|
}
|
||||||
|
return callback();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, log, request,
|
function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, log, request,
|
||||||
actionImplicitDeniesInput = {}, isWebsite = false) {
|
actionImplicitDeniesInput = {}, isWebsite = false) {
|
||||||
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
|
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
|
||||||
|
@ -626,6 +745,7 @@ function isLifecycleSession(arn) {
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
|
validateQuotas,
|
||||||
isBucketAuthorized,
|
isBucketAuthorized,
|
||||||
isObjAuthorized,
|
isObjAuthorized,
|
||||||
getServiceAccountProperties,
|
getServiceAccountProperties,
|
||||||
|
|
|
@ -59,6 +59,7 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
|
||||||
objectKey,
|
objectKey,
|
||||||
versionId: decodedVidResult,
|
versionId: decodedVidResult,
|
||||||
requestType: request.apiMethods || 'restoreObject',
|
requestType: request.apiMethods || 'restoreObject',
|
||||||
|
request,
|
||||||
};
|
};
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
|
|
|
@ -0,0 +1,57 @@
|
||||||
|
const { waterfall } = require('async');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Bucket Update Quota - Update bucket quota
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @param {function} callback - callback to server
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function bucketDeleteQuota(authInfo, request, log, callback) {
|
||||||
|
log.debug('processing request', { method: 'bucketDeleteQuota' });
|
||||||
|
|
||||||
|
const { bucketName } = request;
|
||||||
|
const metadataValParams = {
|
||||||
|
authInfo,
|
||||||
|
bucketName,
|
||||||
|
requestType: request.apiMethods || 'bucketDeleteQuota',
|
||||||
|
request,
|
||||||
|
};
|
||||||
|
return waterfall([
|
||||||
|
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
||||||
|
(err, bucket) => next(err, bucket)),
|
||||||
|
(bucket, next) => {
|
||||||
|
bucket.setQuota(0);
|
||||||
|
metadata.updateBucket(bucket.getName(), bucket, log, err =>
|
||||||
|
next(err, bucket));
|
||||||
|
},
|
||||||
|
], (err, bucket) => {
|
||||||
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
request.method, bucket);
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', {
|
||||||
|
error: err,
|
||||||
|
method: 'bucketDeleteQuota'
|
||||||
|
});
|
||||||
|
monitoring.promMetrics('DELETE', bucketName, err.code,
|
||||||
|
'bucketDeleteQuota');
|
||||||
|
return callback(err, err.code, corsHeaders);
|
||||||
|
} else {
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'DELETE', bucketName, '204', 'bucketDeleteQuota');
|
||||||
|
pushMetric('bucketDeleteQuota', log, {
|
||||||
|
authInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return callback(null, 204, corsHeaders);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = bucketDeleteQuota;
|
|
@ -0,0 +1,58 @@
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bucketGetQuota - Get the bucket quota
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @param {function} callback - callback to server
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function bucketGetQuota(authInfo, request, log, callback) {
|
||||||
|
log.debug('processing request', { method: 'bucketGetQuota' });
|
||||||
|
const { bucketName, headers, method } = request;
|
||||||
|
const metadataValParams = {
|
||||||
|
authInfo,
|
||||||
|
bucketName,
|
||||||
|
requestType: request.apiMethods || 'bucketGetQuota',
|
||||||
|
request,
|
||||||
|
};
|
||||||
|
const xml = [];
|
||||||
|
xml.push(
|
||||||
|
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||||
|
'<GetBucketQuota>',
|
||||||
|
'<Name>', bucketName, '</Name>',
|
||||||
|
);
|
||||||
|
|
||||||
|
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
|
||||||
|
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', {
|
||||||
|
error: err,
|
||||||
|
method: 'bucketGetQuota',
|
||||||
|
});
|
||||||
|
return callback(err, null, corsHeaders);
|
||||||
|
}
|
||||||
|
const bucketQuota = bucket.getQuota();
|
||||||
|
if (!bucketQuota) {
|
||||||
|
log.debug('bucket has no quota', {
|
||||||
|
method: 'bucketGetQuota',
|
||||||
|
});
|
||||||
|
return callback(errors.NoSuchQuota, null,
|
||||||
|
corsHeaders);
|
||||||
|
}
|
||||||
|
xml.push('<Quota>', bucketQuota, '</Quota>',
|
||||||
|
'</GetBucketQuota>');
|
||||||
|
|
||||||
|
pushMetric('getBucketQuota', log, {
|
||||||
|
authInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
});
|
||||||
|
return callback(null, xml.join(''), corsHeaders);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = bucketGetQuota;
|
|
@ -0,0 +1,81 @@
|
||||||
|
const { waterfall } = require('async');
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Bucket Update Quota - Update bucket quota
|
||||||
|
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
|
||||||
|
* @param {object} request - http request object
|
||||||
|
* @param {object} log - Werelogs logger
|
||||||
|
* @param {function} callback - callback to server
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function bucketUpdateQuota(authInfo, request, log, callback) {
|
||||||
|
log.debug('processing request', { method: 'bucketUpdateQuota' });
|
||||||
|
|
||||||
|
const { bucketName } = request;
|
||||||
|
const metadataValParams = {
|
||||||
|
authInfo,
|
||||||
|
bucketName,
|
||||||
|
requestType: request.apiMethods || 'bucketUpdateQuota',
|
||||||
|
request,
|
||||||
|
};
|
||||||
|
let bucket = null;
|
||||||
|
return waterfall([
|
||||||
|
next => standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log,
|
||||||
|
(err, b) => {
|
||||||
|
bucket = b;
|
||||||
|
return next(err, bucket);
|
||||||
|
}),
|
||||||
|
(bucket, next) => {
|
||||||
|
let requestBody;
|
||||||
|
try {
|
||||||
|
requestBody = JSON.parse(request.post);
|
||||||
|
} catch (parseError) {
|
||||||
|
return next(errors.InvalidArgument.customizeDescription('Invalid JSON format in request'));
|
||||||
|
}
|
||||||
|
if (typeof requestBody !== 'object' || Array.isArray(requestBody)) {
|
||||||
|
return next(errors.InvalidArgument.customizeDescription('Request body must be a JSON object'));
|
||||||
|
}
|
||||||
|
return next(null, bucket, requestBody);
|
||||||
|
},
|
||||||
|
(bucket, requestBody, next) => {
|
||||||
|
const quota = parseInt(requestBody.quota, 10);
|
||||||
|
if (Number.isNaN(quota)) {
|
||||||
|
return next(errors.InvalidArgument.customizeDescription('Quota Value should be a number'));
|
||||||
|
}
|
||||||
|
if (quota <= 0) {
|
||||||
|
return next(errors.InvalidArgument.customizeDescription('Quota value must be a positive number'));
|
||||||
|
}
|
||||||
|
// Update the bucket quota
|
||||||
|
bucket.setQuota(quota);
|
||||||
|
return metadata.updateBucket(bucket.getName(), bucket, log, next);
|
||||||
|
},
|
||||||
|
], (err, bucket) => {
|
||||||
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
request.method, bucket);
|
||||||
|
if (err) {
|
||||||
|
log.debug('error processing request', {
|
||||||
|
error: err,
|
||||||
|
method: 'bucketUpdateQuota'
|
||||||
|
});
|
||||||
|
monitoring.promMetrics('PUT', bucketName, err.code,
|
||||||
|
'updateBucketQuota');
|
||||||
|
return callback(err, err.code, corsHeaders);
|
||||||
|
} else {
|
||||||
|
monitoring.promMetrics(
|
||||||
|
'PUT', bucketName, '200', 'updateBucketQuota');
|
||||||
|
pushMetric('updateBucketQuota', log, {
|
||||||
|
authInfo,
|
||||||
|
bucket: bucketName,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return callback(null, corsHeaders);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = bucketUpdateQuota;
|
|
@ -11,7 +11,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
const vault = require('../auth/vault');
|
const vault = require('../auth/vault');
|
||||||
const { isBucketAuthorized, evaluateBucketPolicyWithIAM } =
|
const { isBucketAuthorized, evaluateBucketPolicyWithIAM, validateQuotas } =
|
||||||
require('./apiUtils/authorization/permissionChecks');
|
require('./apiUtils/authorization/permissionChecks');
|
||||||
const { preprocessingVersioningDelete }
|
const { preprocessingVersioningDelete }
|
||||||
= require('./apiUtils/object/versioning');
|
= require('./apiUtils/object/versioning');
|
||||||
|
@ -332,6 +332,11 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||||
|
|
||||||
return callback(null, objMD, versionId);
|
return callback(null, objMD, versionId);
|
||||||
},
|
},
|
||||||
|
// TODO handle inflightsd here
|
||||||
|
(objMD, versionId, callback) => validateQuotas(
|
||||||
|
bucket, request.accountQuotas, ['objectDelete'], 'objectDelete',
|
||||||
|
objMD?.['content-length'] || 0, log, err =>
|
||||||
|
callback(err, objMD, versionId)),
|
||||||
(objMD, versionId, callback) => {
|
(objMD, versionId, callback) => {
|
||||||
const options = preprocessingVersioningDelete(
|
const options = preprocessingVersioningDelete(
|
||||||
bucketName, bucket, objMD, versionId, config.nullVersionCompatMode);
|
bucketName, bucket, objMD, versionId, config.nullVersionCompatMode);
|
||||||
|
|
|
@ -14,6 +14,7 @@ const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator');
|
||||||
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
|
||||||
const monitoring = require('../utilities/monitoringHandler');
|
const monitoring = require('../utilities/monitoringHandler');
|
||||||
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
|
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
|
||||||
|
const { validateQuotas } = require('./apiUtils/authorization/permissionChecks');
|
||||||
|
|
||||||
const versionIdUtils = versioning.VersionID;
|
const versionIdUtils = versioning.VersionID;
|
||||||
|
|
||||||
|
@ -181,9 +182,17 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
}
|
}
|
||||||
return next(null, copyLocator.dataLocator, destBucketMD,
|
return next(null, copyLocator.dataLocator, destBucketMD,
|
||||||
copyLocator.copyObjectSize, sourceVerId,
|
copyLocator.copyObjectSize, sourceVerId,
|
||||||
sourceLocationConstraintName);
|
sourceLocationConstraintName, sourceObjMD);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
function _validateQuotas(dataLocator, destBucketMD,
|
||||||
|
copyObjectSize, sourceVerId,
|
||||||
|
sourceLocationConstraintName, sourceObjMD, next) {
|
||||||
|
return validateQuotas(destBucketMD, request.accountQuotas, valPutParams.requestType, request.apiMethod,
|
||||||
|
sourceObjMD?.['content-length'] || 0, log, err => next(err, dataLocator, destBucketMD,
|
||||||
|
copyObjectSize, sourceVerId,
|
||||||
|
sourceLocationConstraintName));
|
||||||
|
},
|
||||||
// get MPU shadow bucket to get splitter based on MD version
|
// get MPU shadow bucket to get splitter based on MD version
|
||||||
function getMpuShadowBucket(dataLocator, destBucketMD,
|
function getMpuShadowBucket(dataLocator, destBucketMD,
|
||||||
copyObjectSize, sourceVerId,
|
copyObjectSize, sourceVerId,
|
||||||
|
|
|
@ -6,7 +6,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
const { data } = require('../data/wrapper');
|
const { data } = require('../data/wrapper');
|
||||||
const { dataStore } = require('./apiUtils/object/storeObject');
|
const { dataStore } = require('./apiUtils/object/storeObject');
|
||||||
const { isBucketAuthorized } =
|
const { isBucketAuthorized, validateQuotas } =
|
||||||
require('./apiUtils/authorization/permissionChecks');
|
require('./apiUtils/authorization/permissionChecks');
|
||||||
const kms = require('../kms/wrapper');
|
const kms = require('../kms/wrapper');
|
||||||
const metadata = require('../metadata/wrapper');
|
const metadata = require('../metadata/wrapper');
|
||||||
|
@ -103,6 +103,9 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
||||||
const { objectKey } = request;
|
const { objectKey } = request;
|
||||||
const originalIdentityAuthzResults = request.actionImplicitDenies;
|
const originalIdentityAuthzResults = request.actionImplicitDenies;
|
||||||
|
// For validating the request at the destinationBucket level the
|
||||||
|
// `requestType` is the general 'objectPut'.
|
||||||
|
const requestType = request.apiMethods || 'objectPutPart';
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
// Get the destination bucket.
|
// Get the destination bucket.
|
||||||
|
@ -122,9 +125,6 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
}),
|
}),
|
||||||
// Check the bucket authorization.
|
// Check the bucket authorization.
|
||||||
(destinationBucket, next) => {
|
(destinationBucket, next) => {
|
||||||
// For validating the request at the destinationBucket level the
|
|
||||||
// `requestType` is the general 'objectPut'.
|
|
||||||
const requestType = request.apiMethods || 'objectPutPart';
|
|
||||||
if (!isBucketAuthorized(destinationBucket, requestType, canonicalID, authInfo,
|
if (!isBucketAuthorized(destinationBucket, requestType, canonicalID, authInfo,
|
||||||
log, request, request.actionImplicitDenies)) {
|
log, request, request.actionImplicitDenies)) {
|
||||||
log.debug('access denied for user on bucket', { requestType });
|
log.debug('access denied for user on bucket', { requestType });
|
||||||
|
@ -132,6 +132,9 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
}
|
}
|
||||||
return next(null, destinationBucket);
|
return next(null, destinationBucket);
|
||||||
},
|
},
|
||||||
|
(destinationBucket, next) => validateQuotas(
|
||||||
|
destinationBucket, request.accountQuotas, requestType, request.apiMethod, size, log, err =>
|
||||||
|
next(err, destinationBucket)),
|
||||||
// Get bucket server-side encryption, if it exists.
|
// Get bucket server-side encryption, if it exists.
|
||||||
(destinationBucket, next) => getObjectSSEConfiguration(
|
(destinationBucket, next) => getObjectSSEConfiguration(
|
||||||
request.headers, destinationBucket, log,
|
request.headers, destinationBucket, log,
|
||||||
|
|
|
@ -3,9 +3,10 @@ const { errors } = require('arsenal');
|
||||||
|
|
||||||
const metadata = require('./wrapper');
|
const metadata = require('./wrapper');
|
||||||
const BucketInfo = require('arsenal').models.BucketInfo;
|
const BucketInfo = require('arsenal').models.BucketInfo;
|
||||||
const { isBucketAuthorized, isObjAuthorized } =
|
const { isBucketAuthorized, isObjAuthorized, validateQuotas } =
|
||||||
require('../api/apiUtils/authorization/permissionChecks');
|
require('../api/apiUtils/authorization/permissionChecks');
|
||||||
const bucketShield = require('../api/apiUtils/bucket/bucketShield');
|
const bucketShield = require('../api/apiUtils/bucket/bucketShield');
|
||||||
|
const { onlyOwnerAllowed } = require('../../constants');
|
||||||
|
|
||||||
/** getNullVersionFromMaster - retrieves the null version
|
/** getNullVersionFromMaster - retrieves the null version
|
||||||
* metadata via retrieving the master key
|
* metadata via retrieving the master key
|
||||||
|
@ -152,9 +153,6 @@ function validateBucket(bucket, params, log, actionImplicitDenies = {}) {
|
||||||
});
|
});
|
||||||
return errors.NoSuchBucket;
|
return errors.NoSuchBucket;
|
||||||
}
|
}
|
||||||
// if requester is not bucket owner, bucket policy actions should be denied with
|
|
||||||
// MethodNotAllowed error
|
|
||||||
const onlyOwnerAllowed = ['bucketDeletePolicy', 'bucketGetPolicy', 'bucketPutPolicy'];
|
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
if (!Array.isArray(requestType)) {
|
if (!Array.isArray(requestType)) {
|
||||||
requestType = [requestType];
|
requestType = [requestType];
|
||||||
|
@ -229,6 +227,22 @@ function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log,
|
||||||
}
|
}
|
||||||
return next(null, bucket, objMD);
|
return next(null, bucket, objMD);
|
||||||
},
|
},
|
||||||
|
(bucket, objMD, next) => {
|
||||||
|
let contentLength = request?.parsedContentLength || 0;
|
||||||
|
if (!contentLength && objMD?.['content-length']) {
|
||||||
|
// object is being deleted
|
||||||
|
contentLength = -Number.parseInt(objMD['content-length'], 10);
|
||||||
|
} else if (request.apiMethod === 'objectRestore') {
|
||||||
|
// object is being restored
|
||||||
|
contentLength = Number.parseInt(objMD['content-length'], 10);
|
||||||
|
} else if (contentLength && objMD?.['content-length']) {
|
||||||
|
// object is being replaced: store the diff
|
||||||
|
contentLength = Number.parseInt(objMD['content-length'], 10) - contentLength;
|
||||||
|
}
|
||||||
|
// Otherwise, object is either written or will be filtered out when evaluating the quota against actions
|
||||||
|
return validateQuotas(bucket, request.accountQuotas, requestType, request.apiMethod,
|
||||||
|
contentLength, log, err => next(err, bucket, objMD));
|
||||||
|
},
|
||||||
(bucket, objMD, next) => {
|
(bucket, objMD, next) => {
|
||||||
const canonicalID = authInfo.getCanonicalID();
|
const canonicalID = authInfo.getCanonicalID();
|
||||||
if (!isObjAuthorized(bucket, objMD, requestType, canonicalID, authInfo, log, request,
|
if (!isObjAuthorized(bucket, objMD, requestType, canonicalID, authInfo, log, request,
|
||||||
|
|
|
@ -0,0 +1,60 @@
|
||||||
|
const { default: ScubaClient } = require('scubaclient');
|
||||||
|
const { config } = require('../Config');
|
||||||
|
const { externalBackendHealthCheckInterval } = require('../../constants');
|
||||||
|
|
||||||
|
class ScubaClientImpl extends ScubaClient {
|
||||||
|
constructor(config) {
|
||||||
|
super(config.scuba);
|
||||||
|
this.enabled = false;
|
||||||
|
this._healthCheckTimer = null;
|
||||||
|
this._log = null;
|
||||||
|
|
||||||
|
if (config.scuba) {
|
||||||
|
this.enabled = true;
|
||||||
|
} else {
|
||||||
|
this.enabled = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
setup(log) {
|
||||||
|
this._log = log;
|
||||||
|
if (this.enabled) {
|
||||||
|
this.periodicHealthCheck();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_healthCheck() {
|
||||||
|
return this.healthCheck().then(() => {
|
||||||
|
if (!this.enabled) {
|
||||||
|
this._log.info('Scuba health check passed, enabling quotas');
|
||||||
|
}
|
||||||
|
this.enabled = true;
|
||||||
|
}).catch(err => {
|
||||||
|
if (this.enabled) {
|
||||||
|
this._log.warn('Scuba health check failed, disabling quotas', {
|
||||||
|
err: err.name,
|
||||||
|
description: err.message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
this.enabled = false;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
periodicHealthCheck() {
|
||||||
|
if (this._healthCheckTimer) {
|
||||||
|
clearInterval(this._healthCheckTimer);
|
||||||
|
}
|
||||||
|
this._healthCheck();
|
||||||
|
this._healthCheckTimer = setInterval(async () => {
|
||||||
|
this._healthCheck();
|
||||||
|
}, Number(process.env.SCUBA_HEALTHCHECK_FREQUENCY)
|
||||||
|
|| externalBackendHealthCheckInterval);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const ScubaClientInstance = new ScubaClientImpl(config);
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
ScubaClientInstance,
|
||||||
|
ScubaClientImpl,
|
||||||
|
};
|
|
@ -25,6 +25,7 @@ const {
|
||||||
} = require('./management/agentClient');
|
} = require('./management/agentClient');
|
||||||
|
|
||||||
const HttpAgent = require('agentkeepalive');
|
const HttpAgent = require('agentkeepalive');
|
||||||
|
const { ScubaClientInstance } = require('./scuba/wrapper');
|
||||||
const routes = arsenal.s3routes.routes;
|
const routes = arsenal.s3routes.routes;
|
||||||
const { parseLC, MultipleBackendGateway } = arsenal.storage.data;
|
const { parseLC, MultipleBackendGateway } = arsenal.storage.data;
|
||||||
const websiteEndpoints = _config.websiteEndpoints;
|
const websiteEndpoints = _config.websiteEndpoints;
|
||||||
|
@ -321,6 +322,9 @@ class S3Server {
|
||||||
this._startServer(this.routeAdminRequest, _config.metricsPort);
|
this._startServer(this.routeAdminRequest, _config.metricsPort);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start ScubaClient health checks
|
||||||
|
ScubaClientInstance.setup(log);
|
||||||
|
|
||||||
// TODO this should wait for metadata healthcheck to be ok
|
// TODO this should wait for metadata healthcheck to be ok
|
||||||
// TODO only do this in cluster master
|
// TODO only do this in cluster master
|
||||||
if (enableRemoteManagement) {
|
if (enableRemoteManagement) {
|
||||||
|
|
|
@ -21,7 +21,7 @@
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@azure/storage-blob": "^12.12.0",
|
"@azure/storage-blob": "^12.12.0",
|
||||||
"@hapi/joi": "^17.1.0",
|
"@hapi/joi": "^17.1.0",
|
||||||
"arsenal": "git+https://github.com/scality/arsenal#8.1.127",
|
"arsenal": "git+https://github.com/scality/arsenal#77e9b92f3e775e39f5f903a00b702a86b2aa75a1",
|
||||||
"async": "~2.5.0",
|
"async": "~2.5.0",
|
||||||
"aws-sdk": "2.905.0",
|
"aws-sdk": "2.905.0",
|
||||||
"bucketclient": "scality/bucketclient#8.1.9",
|
"bucketclient": "scality/bucketclient#8.1.9",
|
||||||
|
@ -41,6 +41,7 @@
|
||||||
"npm-run-all": "~4.1.5",
|
"npm-run-all": "~4.1.5",
|
||||||
"prom-client": "14.2.0",
|
"prom-client": "14.2.0",
|
||||||
"request": "^2.81.0",
|
"request": "^2.81.0",
|
||||||
|
"scubaclient": "git+https://github.com/scality/scubaclient.git",
|
||||||
"sql-where-parser": "~2.2.1",
|
"sql-where-parser": "~2.2.1",
|
||||||
"utapi": "github:scality/utapi#8.1.13",
|
"utapi": "github:scality/utapi#8.1.13",
|
||||||
"utf-8-validate": "^5.0.8",
|
"utf-8-validate": "^5.0.8",
|
||||||
|
@ -113,6 +114,7 @@
|
||||||
"test_versionid_base62": "S3_VERSION_ID_ENCODING_TYPE=base62 CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit/api",
|
"test_versionid_base62": "S3_VERSION_ID_ENCODING_TYPE=base62 CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit/api",
|
||||||
"test_legacy_location": "CI=true S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
"test_legacy_location": "CI=true S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
||||||
"test_utapi_v2": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/utapi",
|
"test_utapi_v2": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/utapi",
|
||||||
|
"test_scuba": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/scuba",
|
||||||
"multiple_backend_test": "CI=true S3BACKEND=mem S3DATA=multiple mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 20000 --recursive tests/multipleBackend",
|
"multiple_backend_test": "CI=true S3BACKEND=mem S3DATA=multiple mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 20000 --recursive tests/multipleBackend",
|
||||||
"unit_coverage": "CI=true mkdir -p coverage/unit/ && S3BACKEND=mem istanbul cover --dir coverage/unit _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
"unit_coverage": "CI=true mkdir -p coverage/unit/ && S3BACKEND=mem istanbul cover --dir coverage/unit _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
|
||||||
"unit_coverage_legacy_location": "CI=true mkdir -p coverage/unitlegacylocation/ && S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem istanbul cover --dir coverage/unitlegacylocation _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --reporter mocha-junit-reporter --recursive tests/unit"
|
"unit_coverage_legacy_location": "CI=true mkdir -p coverage/unitlegacylocation/ && S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem istanbul cover --dir coverage/unitlegacylocation _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --reporter mocha-junit-reporter --recursive tests/unit"
|
||||||
|
|
|
@ -0,0 +1,39 @@
|
||||||
|
const AWS = require('aws-sdk');
|
||||||
|
const S3 = AWS.S3;
|
||||||
|
const assert = require('assert');
|
||||||
|
const getConfig = require('../support/config');
|
||||||
|
const sendRequest = require('../quota/tooling').sendRequest;
|
||||||
|
|
||||||
|
const bucket = 'deletequotatestbucket';
|
||||||
|
const nonExistantBucket = 'deletequotatestnonexistantbucket';
|
||||||
|
|
||||||
|
describe('Test delete bucket quota', () => {
|
||||||
|
let s3;
|
||||||
|
|
||||||
|
before(() => {
|
||||||
|
const config = getConfig('default', { signatureVersion: 'v4' });
|
||||||
|
s3 = new S3(config);
|
||||||
|
AWS.config.update(config);
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(done => s3.createBucket({ Bucket: bucket }, done));
|
||||||
|
|
||||||
|
afterEach(done => s3.deleteBucket({ Bucket: bucket }, done));
|
||||||
|
|
||||||
|
it('should delete the bucket quota', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('DELETE', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
||||||
|
assert.ok(true);
|
||||||
|
} catch (err) {
|
||||||
|
assert.fail(`Expected no error, but got ${err}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return no such bucket error', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('DELETE', '127.0.0.1:8000', `/${nonExistantBucket}/?quota=true`);
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
|
@ -0,0 +1,77 @@
|
||||||
|
const AWS = require('aws-sdk');
|
||||||
|
const S3 = AWS.S3;
|
||||||
|
const assert = require('assert');
|
||||||
|
const getConfig = require('../support/config');
|
||||||
|
const sendRequest = require('../quota/tooling').sendRequest;
|
||||||
|
|
||||||
|
const bucket = 'getquotatestbucket';
|
||||||
|
const quota = { quota: 1000 };
|
||||||
|
|
||||||
|
describe('Test get bucket quota', () => {
|
||||||
|
let s3;
|
||||||
|
|
||||||
|
before(() => {
|
||||||
|
const config = getConfig('default', { signatureVersion: 'v4' });
|
||||||
|
s3 = new S3(config);
|
||||||
|
AWS.config.update(config);
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(done => s3.createBucket({ Bucket: bucket }, done));
|
||||||
|
|
||||||
|
afterEach(done => s3.deleteBucket({ Bucket: bucket }, done));
|
||||||
|
|
||||||
|
it('should return the quota', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota));
|
||||||
|
const data = await sendRequest('GET', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
||||||
|
assert.strictEqual(data.GetBucketQuota.Name[0], bucket);
|
||||||
|
assert.strictEqual(data.GetBucketQuota.Quota[0], '1000');
|
||||||
|
} catch (err) {
|
||||||
|
assert.fail(`Expected no error, but got ${err}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return no such bucket error', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('GET', '127.0.0.1:8000', '/test/?quota=true');
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return no such bucket quota', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('DELETE', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
||||||
|
try {
|
||||||
|
await sendRequest('GET', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
||||||
|
assert.fail('Expected NoSuchQuota error');
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'NoSuchQuota');
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
assert.fail(`Expected no error, but got ${err}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return no such bucket error', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('GET', '127.0.0.1:8000', '/test/?quota=true');
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return no such bucket quota', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('DELETE', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
||||||
|
try {
|
||||||
|
await sendRequest('GET', '127.0.0.1:8000', `/${bucket}/?quota=true`);
|
||||||
|
assert.fail('Expected NoSuchQuota error');
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'NoSuchQuota');
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
assert.fail(`Expected no error, but got ${err}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
|
@ -0,0 +1,61 @@
|
||||||
|
const AWS = require('aws-sdk');
|
||||||
|
const S3 = AWS.S3;
|
||||||
|
|
||||||
|
const assert = require('assert');
|
||||||
|
const getConfig = require('../support/config');
|
||||||
|
const sendRequest = require('../quota/tooling').sendRequest;
|
||||||
|
|
||||||
|
const bucket = 'updatequotatestbucket';
|
||||||
|
const nonExistantBucket = 'updatequotatestnonexistantbucket';
|
||||||
|
const quota = { quota: 2000 };
|
||||||
|
const negativeQuota = { quota: -1000 };
|
||||||
|
const wrongquotaFromat = '1000';
|
||||||
|
|
||||||
|
describe('Test update bucket quota', () => {
|
||||||
|
let s3;
|
||||||
|
|
||||||
|
before(() => {
|
||||||
|
const config = getConfig('default', { signatureVersion: 'v4' });
|
||||||
|
s3 = new S3(config);
|
||||||
|
AWS.config.update(config);
|
||||||
|
});
|
||||||
|
|
||||||
|
beforeEach(done => s3.createBucket({ Bucket: bucket }, done));
|
||||||
|
|
||||||
|
afterEach(done => s3.deleteBucket({ Bucket: bucket }, done));
|
||||||
|
|
||||||
|
it('should update the quota', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(quota));
|
||||||
|
assert.ok(true);
|
||||||
|
} catch (err) {
|
||||||
|
assert.fail(`Expected no error, but got ${err}`);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return no such bucket error', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('PUT', '127.0.0.1:8000', `/${nonExistantBucket}/?quota=true`, JSON.stringify(quota));
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'NoSuchBucket');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return error when quota is negative', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(negativeQuota));
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'InvalidArgument');
|
||||||
|
assert.strictEqual(err.Error.Message[0], 'Quota value must be a positive number');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return error when quota is not in correct format', async () => {
|
||||||
|
try {
|
||||||
|
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, wrongquotaFromat);
|
||||||
|
} catch (err) {
|
||||||
|
assert.strictEqual(err.Error.Code[0], 'InvalidArgument');
|
||||||
|
assert.strictEqual(err.Error.Message[0], 'Request body must be a JSON object');
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
|
@ -33,7 +33,7 @@ describe('aws-node-sdk v2auth query tests', function testSuite() {
|
||||||
let s3;
|
let s3;
|
||||||
|
|
||||||
before(() => {
|
before(() => {
|
||||||
const config = getConfig('default');
|
const config = getConfig('default', { signatureVersion: 'v2' });
|
||||||
|
|
||||||
s3 = new S3(config);
|
s3 = new S3(config);
|
||||||
});
|
});
|
||||||
|
|
|
@ -0,0 +1,52 @@
|
||||||
|
const fetch = require('node-fetch');
|
||||||
|
const AWS = require('aws-sdk');
|
||||||
|
const xml2js = require('xml2js');
|
||||||
|
|
||||||
|
const sendRequest = async (method, host, path, body = '', config = null) =>
|
||||||
|
new Promise(async (resolve, reject) => {
|
||||||
|
const service = 's3';
|
||||||
|
const endpoint = new AWS.Endpoint(host);
|
||||||
|
|
||||||
|
const request = new AWS.HttpRequest(endpoint);
|
||||||
|
request.method = method.toUpperCase();
|
||||||
|
request.path = path;
|
||||||
|
request.body = body;
|
||||||
|
request.headers.Host = host;
|
||||||
|
request.headers['X-Amz-Date'] = new Date().toISOString().replace(/[:\-]|\.\d{3}/g, '');
|
||||||
|
const sha256hash = AWS.util.crypto.sha256(request.body || '', 'hex');
|
||||||
|
request.headers['X-Amz-Content-SHA256'] = sha256hash;
|
||||||
|
request.region = 'us-east-1';
|
||||||
|
|
||||||
|
const signer = new AWS.Signers.V4(request, service);
|
||||||
|
const accessKeyId = config?.accessKey || AWS.config.credentials?.accessKeyId;
|
||||||
|
const secretAccessKey = config?.secretKey || AWS.config.credentials?.secretAccessKey;
|
||||||
|
const credentials = new AWS.Credentials(accessKeyId, secretAccessKey);
|
||||||
|
signer.addAuthorization(credentials, new Date());
|
||||||
|
|
||||||
|
const url = `http://${host}${path}`;
|
||||||
|
const options = {
|
||||||
|
method: request.method,
|
||||||
|
headers: request.headers,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (method !== 'GET') {
|
||||||
|
options.body = request.body;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await fetch(url, options);
|
||||||
|
const text = await response.text();
|
||||||
|
const result = await xml2js.parseStringPromise(text);
|
||||||
|
if (result && result.Error) {
|
||||||
|
reject(result);
|
||||||
|
} else {
|
||||||
|
resolve(result);
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
reject(error);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
sendRequest,
|
||||||
|
};
|
|
@ -0,0 +1,479 @@
|
||||||
|
const async = require('async');
|
||||||
|
const assert = require('assert');
|
||||||
|
const { S3 } = require('aws-sdk');
|
||||||
|
const getConfig = require('../functional/aws-node-sdk/test/support/config');
|
||||||
|
const { Scuba: MockScuba, inflightFlushFrequencyMS } = require('../utilities/mock/Scuba');
|
||||||
|
const sendRequest = require('../functional/aws-node-sdk/test/quota/tooling').sendRequest;
|
||||||
|
const memCredentials = require('../functional/aws-node-sdk/lib/json/mem_credentials.json');
|
||||||
|
|
||||||
|
let s3Client = null;
|
||||||
|
const quota = { quota: 1000 };
|
||||||
|
|
||||||
|
function wait(timeoutMs, cb) {
|
||||||
|
setTimeout(cb, timeoutMs);
|
||||||
|
}
|
||||||
|
|
||||||
|
function createBucket(bucket, cb) {
|
||||||
|
return s3Client.createBucket({
|
||||||
|
Bucket: bucket,
|
||||||
|
}, (err, data) => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return cb(err, data);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function deleteBucket(bucket, cb) {
|
||||||
|
return s3Client.deleteBucket({
|
||||||
|
Bucket: bucket,
|
||||||
|
}, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return cb(err);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function putObject(bucket, key, size, cb) {
|
||||||
|
return s3Client.putObject({
|
||||||
|
Bucket: bucket,
|
||||||
|
Key: key,
|
||||||
|
Body: Buffer.alloc(size),
|
||||||
|
}, cb);
|
||||||
|
}
|
||||||
|
|
||||||
|
function copyObject(bucket, key, cb) {
|
||||||
|
return s3Client.copyObject({
|
||||||
|
Bucket: bucket,
|
||||||
|
CopySource: `/${bucket}/${key}`,
|
||||||
|
Key: `${key}-copy`,
|
||||||
|
|
||||||
|
}, cb);
|
||||||
|
}
|
||||||
|
|
||||||
|
function deleteObject(bucket, key, cb) {
|
||||||
|
return s3Client.deleteObject({
|
||||||
|
Bucket: bucket,
|
||||||
|
Key: key,
|
||||||
|
}, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return cb(err);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function objectMPU(bucket, key, parts, partSize, callback) {
|
||||||
|
let ETags = [];
|
||||||
|
let uploadId = null;
|
||||||
|
const partNumbers = Array.from(Array(parts).keys());
|
||||||
|
const initiateMPUParams = {
|
||||||
|
Bucket: bucket,
|
||||||
|
Key: key,
|
||||||
|
};
|
||||||
|
return async.waterfall([
|
||||||
|
next => s3Client.createMultipartUpload(initiateMPUParams,
|
||||||
|
(err, data) => {
|
||||||
|
if (err) {
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
uploadId = data.UploadId;
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next =>
|
||||||
|
async.mapLimit(partNumbers, 1, (partNumber, callback) => {
|
||||||
|
const uploadPartParams = {
|
||||||
|
Bucket: bucket,
|
||||||
|
Key: key,
|
||||||
|
PartNumber: partNumber + 1,
|
||||||
|
UploadId: uploadId,
|
||||||
|
Body: Buffer.alloc(partSize),
|
||||||
|
};
|
||||||
|
|
||||||
|
return s3Client.uploadPart(uploadPartParams,
|
||||||
|
(err, data) => {
|
||||||
|
if (err) {
|
||||||
|
return callback(err);
|
||||||
|
}
|
||||||
|
return callback(null, data.ETag);
|
||||||
|
});
|
||||||
|
}, (err, results) => {
|
||||||
|
if (err) {
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
ETags = results;
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => {
|
||||||
|
const params = {
|
||||||
|
Bucket: bucket,
|
||||||
|
Key: key,
|
||||||
|
MultipartUpload: {
|
||||||
|
Parts: partNumbers.map(n => ({
|
||||||
|
ETag: ETags[n],
|
||||||
|
PartNumber: n + 1,
|
||||||
|
})),
|
||||||
|
},
|
||||||
|
UploadId: uploadId,
|
||||||
|
};
|
||||||
|
return s3Client.completeMultipartUpload(params, next);
|
||||||
|
},
|
||||||
|
], err => callback(err, uploadId));
|
||||||
|
}
|
||||||
|
|
||||||
|
function abortMPU(bucket, key, uploadId, callback) {
|
||||||
|
return s3Client.abortMultipartUpload({
|
||||||
|
Bucket: bucket,
|
||||||
|
Key: key,
|
||||||
|
UploadId: uploadId,
|
||||||
|
}, callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
function uploadPartCopy(bucket, key, partNumber, partSize, sleepDuration, keyToCopy, callback) {
|
||||||
|
const ETags = [];
|
||||||
|
let uploadId = null;
|
||||||
|
const parts = 5;
|
||||||
|
const partNumbers = Array.from(Array(parts).keys());
|
||||||
|
const initiateMPUParams = {
|
||||||
|
Bucket: bucket,
|
||||||
|
Key: key,
|
||||||
|
};
|
||||||
|
return async.waterfall([
|
||||||
|
next => s3Client.createMultipartUpload(initiateMPUParams,
|
||||||
|
(err, data) => {
|
||||||
|
if (err) {
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
uploadId = data.UploadId;
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => {
|
||||||
|
const uploadPartParams = {
|
||||||
|
Bucket: bucket,
|
||||||
|
Key: key,
|
||||||
|
PartNumber: partNumber + 1,
|
||||||
|
UploadId: uploadId,
|
||||||
|
Body: Buffer.alloc(partSize),
|
||||||
|
};
|
||||||
|
return s3Client.uploadPart(uploadPartParams, (err, data) => {
|
||||||
|
if (err) {
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
ETags[partNumber] = data.ETag;
|
||||||
|
return next();
|
||||||
|
});
|
||||||
|
},
|
||||||
|
next => wait(sleepDuration, next),
|
||||||
|
next => {
|
||||||
|
const copyPartParams = {
|
||||||
|
Bucket: bucket,
|
||||||
|
CopySource: `/${bucket}/${keyToCopy}`,
|
||||||
|
Key: `${key}-copy`,
|
||||||
|
PartNumber: partNumber + 1,
|
||||||
|
UploadId: uploadId,
|
||||||
|
};
|
||||||
|
return s3Client.uploadPartCopy(copyPartParams, (err, data) => {
|
||||||
|
if (err) {
|
||||||
|
return next(err);
|
||||||
|
}
|
||||||
|
ETags[partNumber] = data.ETag;
|
||||||
|
return next(null, data.ETag);
|
||||||
|
});
|
||||||
|
},
|
||||||
|
next => {
|
||||||
|
const params = {
|
||||||
|
Bucket: bucket,
|
||||||
|
Key: key,
|
||||||
|
MultipartUpload: {
|
||||||
|
Parts: partNumbers.map(n => ({
|
||||||
|
ETag: ETags[n],
|
||||||
|
PartNumber: n + 1,
|
||||||
|
})),
|
||||||
|
},
|
||||||
|
UploadId: uploadId,
|
||||||
|
};
|
||||||
|
return s3Client.completeMultipartUpload(params, next);
|
||||||
|
},
|
||||||
|
], err => callback(err, uploadId));
|
||||||
|
}
|
||||||
|
|
||||||
|
function restoreObject(bucket, key, callback) {
|
||||||
|
return s3Client.restoreObject({
|
||||||
|
Bucket: bucket,
|
||||||
|
Key: key,
|
||||||
|
RestoreRequest: {
|
||||||
|
Days: 1,
|
||||||
|
},
|
||||||
|
}, callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
function multiObjectDelete(bucket, keys, callback) {
|
||||||
|
return s3Client.deleteObjects({
|
||||||
|
Bucket: bucket,
|
||||||
|
Delete: {
|
||||||
|
Objects: keys.map(key => ({ Key: key })),
|
||||||
|
},
|
||||||
|
}, callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('quota evaluation with scuba metrics', function t() {
|
||||||
|
this.timeout(30000);
|
||||||
|
const scuba = new MockScuba();
|
||||||
|
const putQuotaVerb = 'PUT';
|
||||||
|
const config = {
|
||||||
|
accessKey: memCredentials.default.accessKey,
|
||||||
|
secretKey: memCredentials.default.secretKey,
|
||||||
|
};
|
||||||
|
|
||||||
|
before(done => {
|
||||||
|
const config = getConfig('default', { signatureVersion: 'v4' });
|
||||||
|
s3Client = new S3(config);
|
||||||
|
scuba.start();
|
||||||
|
return wait(2000, done);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
scuba.reset();
|
||||||
|
});
|
||||||
|
|
||||||
|
after(() => {
|
||||||
|
scuba.stop();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return QuotaExceeded when trying to PutObject in a bucket with quota', done => {
|
||||||
|
const bucket = 'quota-test-bucket1';
|
||||||
|
const key = 'quota-test-object';
|
||||||
|
const size = 1024;
|
||||||
|
return async.series([
|
||||||
|
next => createBucket(bucket, next),
|
||||||
|
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
||||||
|
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
||||||
|
next => putObject(bucket, key, size, err => {
|
||||||
|
assert.strictEqual(err.code, 'QuotaExceeded');
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => deleteBucket(bucket, next),
|
||||||
|
], done);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return QuotaExceeded when trying to CopyObject in a bucket with quota', done => {
|
||||||
|
const bucket = 'quota-test-bucket2';
|
||||||
|
const key = 'quota-test-object';
|
||||||
|
const size = 900;
|
||||||
|
return async.series([
|
||||||
|
next => createBucket(bucket, next),
|
||||||
|
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
||||||
|
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
||||||
|
next => putObject(bucket, key, size, next),
|
||||||
|
next => wait(inflightFlushFrequencyMS * 2, next),
|
||||||
|
next => copyObject(bucket, key, err => {
|
||||||
|
assert.strictEqual(err.code, 'QuotaExceeded');
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => deleteObject(bucket, key, next),
|
||||||
|
next => deleteBucket(bucket, next),
|
||||||
|
], done);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return QuotaExceeded when trying to complete MPU in a bucket with quota', done => {
|
||||||
|
const bucket = 'quota-test-bucket3';
|
||||||
|
const key = 'quota-test-object';
|
||||||
|
const parts = 5;
|
||||||
|
const partSize = 1024 * 1024 * 6;
|
||||||
|
let uploadId = null;
|
||||||
|
return async.series([
|
||||||
|
next => createBucket(bucket, next),
|
||||||
|
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
||||||
|
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
||||||
|
next => objectMPU(bucket, key, parts, partSize, (err, _uploadId) => {
|
||||||
|
uploadId = _uploadId;
|
||||||
|
assert.strictEqual(err.code, 'QuotaExceeded');
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => abortMPU(bucket, key, uploadId, next),
|
||||||
|
next => wait(inflightFlushFrequencyMS * 2, next),
|
||||||
|
next => {
|
||||||
|
assert.strictEqual(scuba.getInflightsForBucket(bucket), 0);
|
||||||
|
return next();
|
||||||
|
},
|
||||||
|
next => deleteBucket(bucket, next),
|
||||||
|
], done);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not return QuotaExceeded if the quota is not exceeded', done => {
|
||||||
|
const bucket = 'quota-test-bucket4';
|
||||||
|
const key = 'quota-test-object';
|
||||||
|
const size = 300;
|
||||||
|
return async.series([
|
||||||
|
next => createBucket(bucket, next),
|
||||||
|
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
||||||
|
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
||||||
|
next => putObject(bucket, key, size, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => deleteObject(bucket, key, next),
|
||||||
|
next => deleteBucket(bucket, next),
|
||||||
|
], done);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not evaluate quotas if the backend is not available', done => {
|
||||||
|
scuba.stop();
|
||||||
|
const bucket = 'quota-test-bucket5';
|
||||||
|
const key = 'quota-test-object';
|
||||||
|
const size = 1024;
|
||||||
|
return async.series([
|
||||||
|
next => createBucket(bucket, next),
|
||||||
|
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
||||||
|
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
||||||
|
next => putObject(bucket, key, size, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => deleteObject(bucket, key, next),
|
||||||
|
next => deleteBucket(bucket, next),
|
||||||
|
], err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
scuba.start();
|
||||||
|
return wait(2000, done);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return QuotaExceeded when trying to copy a part in a bucket with quota', done => {
|
||||||
|
const bucket = 'quota-test-bucket6';
|
||||||
|
const key = 'quota-test-object-copy';
|
||||||
|
const keyToCopy = 'quota-test-existing';
|
||||||
|
const parts = 5;
|
||||||
|
const partSize = 1024 * 1024 * 6;
|
||||||
|
let uploadId = null;
|
||||||
|
return async.series([
|
||||||
|
next => createBucket(bucket, next),
|
||||||
|
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
||||||
|
JSON.stringify({ quota: Math.round(partSize * 2.5) }), config)
|
||||||
|
.then(() => next()).catch(err => next(err)),
|
||||||
|
next => putObject(bucket, keyToCopy, partSize, next),
|
||||||
|
next => uploadPartCopy(bucket, key, parts, partSize, inflightFlushFrequencyMS * 2, keyToCopy,
|
||||||
|
(err, _uploadId) => {
|
||||||
|
uploadId = _uploadId;
|
||||||
|
assert.strictEqual(err.code, 'QuotaExceeded');
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => abortMPU(bucket, key, uploadId, next),
|
||||||
|
next => deleteObject(bucket, keyToCopy, next),
|
||||||
|
next => deleteBucket(bucket, next),
|
||||||
|
], done);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return QuotaExceeded when trying to restore an object in a bucket with quota', done => {
|
||||||
|
const bucket = 'quota-test-bucket7';
|
||||||
|
const key = 'quota-test-object';
|
||||||
|
const size = 900;
|
||||||
|
return async.series([
|
||||||
|
next => createBucket(bucket, next),
|
||||||
|
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
||||||
|
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
||||||
|
next => putObject(bucket, key, size, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => wait(inflightFlushFrequencyMS * 2, next),
|
||||||
|
next => restoreObject(bucket, key, err => {
|
||||||
|
assert.strictEqual(err.code, 'QuotaExceeded');
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => deleteObject(bucket, key, next),
|
||||||
|
next => deleteBucket(bucket, next),
|
||||||
|
], done);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should allow writes after deleting data with quotas', done => {
|
||||||
|
const bucket = 'quota-test-bucket8';
|
||||||
|
const key = 'quota-test-object';
|
||||||
|
const size = 400;
|
||||||
|
return async.series([
|
||||||
|
next => createBucket(bucket, next),
|
||||||
|
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
||||||
|
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
||||||
|
next => putObject(bucket, `${key}1`, size, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => putObject(bucket, `${key}2`, size, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => wait(inflightFlushFrequencyMS * 2, next),
|
||||||
|
next => putObject(bucket, `${key}3`, size, err => {
|
||||||
|
assert.strictEqual(err.code, 'QuotaExceeded');
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => wait(inflightFlushFrequencyMS * 2, next),
|
||||||
|
next => {
|
||||||
|
assert.strictEqual(scuba.getInflightsForBucket(bucket), size * 2);
|
||||||
|
return next();
|
||||||
|
},
|
||||||
|
next => wait(inflightFlushFrequencyMS * 2, next),
|
||||||
|
next => deleteObject(bucket, `${key}2`, next),
|
||||||
|
next => wait(inflightFlushFrequencyMS * 2, next),
|
||||||
|
next => putObject(bucket, `${key}4`, size, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => deleteObject(bucket, `${key}1`, next),
|
||||||
|
next => deleteObject(bucket, `${key}3`, next),
|
||||||
|
next => deleteObject(bucket, `${key}4`, next),
|
||||||
|
next => deleteBucket(bucket, next),
|
||||||
|
], done);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not increase the inflights when the object is being rewritten with a smaller object', done => {
|
||||||
|
const bucket = 'quota-test-bucket9';
|
||||||
|
const key = 'quota-test-object';
|
||||||
|
const size = 400;
|
||||||
|
return async.series([
|
||||||
|
next => createBucket(bucket, next),
|
||||||
|
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
||||||
|
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
||||||
|
next => putObject(bucket, key, size, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => wait(inflightFlushFrequencyMS * 2, next),
|
||||||
|
next => putObject(bucket, key, size - 100, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => {
|
||||||
|
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
|
||||||
|
return next();
|
||||||
|
},
|
||||||
|
next => deleteObject(bucket, key, next),
|
||||||
|
next => deleteBucket(bucket, next),
|
||||||
|
], done);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should decrease the inflights when performing multi object delete', done => {
|
||||||
|
const bucket = 'quota-test-bucket10';
|
||||||
|
const key = 'quota-test-object';
|
||||||
|
const size = 400;
|
||||||
|
return async.series([
|
||||||
|
next => createBucket(bucket, next),
|
||||||
|
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
|
||||||
|
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
|
||||||
|
next => putObject(bucket, `${key}1`, size, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return next();
|
||||||
|
}
|
||||||
|
),
|
||||||
|
next => putObject(bucket, `${key}2`, size, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => wait(inflightFlushFrequencyMS * 2, next),
|
||||||
|
next => multiObjectDelete(bucket, [`${key}1`, `${key}2`], err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return next();
|
||||||
|
}),
|
||||||
|
next => {
|
||||||
|
assert.strictEqual(scuba.getInflightsForBucket(bucket), 0);
|
||||||
|
return next();
|
||||||
|
},
|
||||||
|
next => deleteBucket(bucket, next),
|
||||||
|
], done);
|
||||||
|
});
|
||||||
|
});
|
|
@ -350,6 +350,49 @@ describe('Config', () => {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('scuba option setup', () => {
|
||||||
|
let oldConfig;
|
||||||
|
|
||||||
|
before(() => {
|
||||||
|
oldConfig = process.env.S3_CONFIG_FILE;
|
||||||
|
process.env.S3_CONFIG_FILE =
|
||||||
|
'tests/unit/testConfigs/allOptsConfig/config.json';
|
||||||
|
});
|
||||||
|
|
||||||
|
after(() => {
|
||||||
|
process.env.S3_CONFIG_FILE = oldConfig;
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should set up scuba', () => {
|
||||||
|
const { ConfigObject } = require('../../lib/Config');
|
||||||
|
const config = new ConfigObject();
|
||||||
|
|
||||||
|
assert.deepStrictEqual(
|
||||||
|
config.scuba,
|
||||||
|
{
|
||||||
|
host: 'localhost',
|
||||||
|
port: 8100,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use environment variables for scuba', () => {
|
||||||
|
setEnv('SCUBA_HOST', 'scubahost');
|
||||||
|
setEnv('SCUBA_PORT', 1234);
|
||||||
|
|
||||||
|
const { ConfigObject } = require('../../lib/Config');
|
||||||
|
const config = new ConfigObject();
|
||||||
|
|
||||||
|
assert.deepStrictEqual(
|
||||||
|
config.scuba,
|
||||||
|
{
|
||||||
|
host: 'scubahost',
|
||||||
|
port: 1234,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
describe('utapi option setup', () => {
|
describe('utapi option setup', () => {
|
||||||
let oldConfig;
|
let oldConfig;
|
||||||
|
|
||||||
|
|
|
@ -39,7 +39,7 @@ describe('getObjMetadataAndDelete function for multiObjectDelete', () => {
|
||||||
headers: {},
|
headers: {},
|
||||||
parsedContentLength: contentLength,
|
parsedContentLength: contentLength,
|
||||||
}, postBody);
|
}, postBody);
|
||||||
const bucket = { getVersioningConfiguration: () => null };
|
const bucket = { getVersioningConfiguration: () => null, getQuota: () => 0 };
|
||||||
|
|
||||||
beforeEach(done => {
|
beforeEach(done => {
|
||||||
cleanup();
|
cleanup();
|
||||||
|
|
|
@ -1,13 +1,33 @@
|
||||||
|
const sinon = require('sinon');
|
||||||
const assert = require('assert');
|
const assert = require('assert');
|
||||||
const {
|
const {
|
||||||
checkBucketAcls,
|
checkBucketAcls,
|
||||||
checkObjectAcls,
|
checkObjectAcls,
|
||||||
validatePolicyConditions,
|
validatePolicyConditions,
|
||||||
|
validateQuotas,
|
||||||
} = require('../../../lib/api/apiUtils/authorization/permissionChecks');
|
} = require('../../../lib/api/apiUtils/authorization/permissionChecks');
|
||||||
const constants = require('../../../constants');
|
const constants = require('../../../constants');
|
||||||
|
const { ScubaClientInstance } = require('../../../lib/scuba/wrapper');
|
||||||
|
|
||||||
const { bucketOwnerActions, logId } = constants;
|
const { bucketOwnerActions, logId } = constants;
|
||||||
|
|
||||||
|
const mockBucket = {
|
||||||
|
getQuota: () => 100,
|
||||||
|
getName: () => 'bucketName',
|
||||||
|
getCreationDate: () => '2022-01-01T00:00:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockBucketNoQuota = {
|
||||||
|
getQuota: () => 100,
|
||||||
|
getName: () => 'bucketName',
|
||||||
|
getCreationDate: () => '2022-01-01T00:00:00.000Z',
|
||||||
|
};
|
||||||
|
|
||||||
|
const mockLog = {
|
||||||
|
warn: sinon.stub(),
|
||||||
|
debug: sinon.stub(),
|
||||||
|
};
|
||||||
|
|
||||||
describe('checkBucketAcls', () => {
|
describe('checkBucketAcls', () => {
|
||||||
const mockBucket = {
|
const mockBucket = {
|
||||||
getOwner: () => 'ownerId',
|
getOwner: () => 'ownerId',
|
||||||
|
@ -534,3 +554,307 @@ describe('validatePolicyConditions', () => {
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
describe('validateQuotas (buckets)', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
ScubaClientInstance.enabled = true;
|
||||||
|
ScubaClientInstance.getLatestMetrics = sinon.stub().resolves({});
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
sinon.restore();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null if quota is <= 0 or scuba is disabled', done => {
|
||||||
|
validateQuotas(mockBucketNoQuota, {}, [], '', false, mockLog, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.called, false);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null if scuba is disabled', done => {
|
||||||
|
ScubaClientInstance.enabled = false;
|
||||||
|
validateQuotas(mockBucket, {}, [], '', false, mockLog, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.called, false);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null if metrics retrieval fails', done => {
|
||||||
|
ScubaClientInstance.enabled = true;
|
||||||
|
const error = new Error('Failed to get metrics');
|
||||||
|
ScubaClientInstance.getLatestMetrics.rejects(error);
|
||||||
|
|
||||||
|
validateQuotas(mockBucket, {}, ['objectPut', 'getObject'], 'objectPut', 1, mockLog, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledOnce, true);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
|
||||||
|
'bucket',
|
||||||
|
'bucketName_1640995200000',
|
||||||
|
null,
|
||||||
|
{
|
||||||
|
action: 'objectPut',
|
||||||
|
inflight: 1,
|
||||||
|
}
|
||||||
|
), true);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return errors.QuotaExceeded if quota is exceeded', done => {
|
||||||
|
const result1 = {
|
||||||
|
bytesTotal: 150,
|
||||||
|
};
|
||||||
|
const result2 = {
|
||||||
|
bytesTotal: 120,
|
||||||
|
};
|
||||||
|
ScubaClientInstance.getLatestMetrics.resolves(result1);
|
||||||
|
ScubaClientInstance.getLatestMetrics.resolves(result2);
|
||||||
|
|
||||||
|
validateQuotas(mockBucket, {}, ['objectPut', 'getObject'], 'objectPut', 1, mockLog, err => {
|
||||||
|
assert.strictEqual(err.is.QuotaExceeded, true);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledTwice, true);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
|
||||||
|
'bucket',
|
||||||
|
'bucketName_1640995200000',
|
||||||
|
null,
|
||||||
|
{
|
||||||
|
action: 'objectPut',
|
||||||
|
inflight: 1,
|
||||||
|
}
|
||||||
|
), true);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not return QuotaExceeded if the quotas are exceeded but operation is a delete', done => {
|
||||||
|
const result1 = {
|
||||||
|
bytesTotal: 150,
|
||||||
|
};
|
||||||
|
const result2 = {
|
||||||
|
bytesTotal: 120,
|
||||||
|
};
|
||||||
|
ScubaClientInstance.getLatestMetrics.resolves(result1);
|
||||||
|
ScubaClientInstance.getLatestMetrics.resolves(result2);
|
||||||
|
|
||||||
|
validateQuotas(mockBucket, {}, ['objectDelete'], 'objectDelete', -50, mockLog, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledOnce, true);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
|
||||||
|
'bucket',
|
||||||
|
'bucketName_1640995200000',
|
||||||
|
null,
|
||||||
|
{
|
||||||
|
action: 'objectDelete',
|
||||||
|
inflight: -50,
|
||||||
|
}
|
||||||
|
), true);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null if quota is not exceeded', done => {
|
||||||
|
const result1 = {
|
||||||
|
bytesTotal: 80,
|
||||||
|
};
|
||||||
|
const result2 = {
|
||||||
|
bytesTotal: 90,
|
||||||
|
};
|
||||||
|
ScubaClientInstance.getLatestMetrics.resolves(result1);
|
||||||
|
ScubaClientInstance.getLatestMetrics.resolves(result2);
|
||||||
|
|
||||||
|
validateQuotas(mockBucket, {}, ['objectRestore', 'objectPut'], 'objectRestore',
|
||||||
|
true, mockLog, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledTwice, true);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
|
||||||
|
'bucket',
|
||||||
|
'bucketName_1640995200000',
|
||||||
|
null,
|
||||||
|
{
|
||||||
|
action: 'objectRestore',
|
||||||
|
inflight: true,
|
||||||
|
}
|
||||||
|
), true);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('validateQuotas (with accounts)', () => {
|
||||||
|
beforeEach(() => {
|
||||||
|
ScubaClientInstance.enabled = true;
|
||||||
|
ScubaClientInstance.getLatestMetrics = sinon.stub().resolves({});
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
sinon.restore();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null if quota is <= 0 or scuba is disabled', done => {
|
||||||
|
validateQuotas(mockBucketNoQuota, {
|
||||||
|
account: 'test_1',
|
||||||
|
quota: 0,
|
||||||
|
}, [], '', false, mockLog, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.called, false);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not return null if bucket quota is <= 0 but account quota is > 0', done => {
|
||||||
|
validateQuotas(mockBucketNoQuota, {
|
||||||
|
account: 'test_1',
|
||||||
|
quota: 1000,
|
||||||
|
}, [], '', false, mockLog, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.called, false);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null if scuba is disabled', done => {
|
||||||
|
ScubaClientInstance.enabled = false;
|
||||||
|
validateQuotas(mockBucket, {
|
||||||
|
account: 'test_1',
|
||||||
|
quota: 1000,
|
||||||
|
}, [], '', false, mockLog, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.called, false);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null if metrics retrieval fails', done => {
|
||||||
|
ScubaClientInstance.enabled = true;
|
||||||
|
const error = new Error('Failed to get metrics');
|
||||||
|
ScubaClientInstance.getLatestMetrics.rejects(error);
|
||||||
|
|
||||||
|
validateQuotas(mockBucket, {
|
||||||
|
account: 'test_1',
|
||||||
|
quota: 1000,
|
||||||
|
}, ['objectPut', 'getObject'], 'objectPut', 1, mockLog, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledOnce, true);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
|
||||||
|
'bucket',
|
||||||
|
'bucketName_1640995200000',
|
||||||
|
null,
|
||||||
|
{
|
||||||
|
action: 'objectPut',
|
||||||
|
inflight: 1,
|
||||||
|
}
|
||||||
|
), true);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return errors.QuotaExceeded if quota is exceeded', done => {
|
||||||
|
const result1 = {
|
||||||
|
bytesTotal: 150,
|
||||||
|
};
|
||||||
|
const result2 = {
|
||||||
|
bytesTotal: 120,
|
||||||
|
};
|
||||||
|
ScubaClientInstance.getLatestMetrics.resolves(result1);
|
||||||
|
ScubaClientInstance.getLatestMetrics.resolves(result2);
|
||||||
|
|
||||||
|
validateQuotas(mockBucket, {
|
||||||
|
account: 'test_1',
|
||||||
|
quota: 100,
|
||||||
|
}, ['objectPut', 'getObject'], 'objectPut', 1, mockLog, err => {
|
||||||
|
assert.strictEqual(err.is.QuotaExceeded, true);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.callCount, 4);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
|
||||||
|
'bucket',
|
||||||
|
'bucketName_1640995200000',
|
||||||
|
null,
|
||||||
|
{
|
||||||
|
action: 'objectPut',
|
||||||
|
inflight: 1,
|
||||||
|
}
|
||||||
|
), true);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not return QuotaExceeded if the quotas are exceeded but operation is a delete', done => {
|
||||||
|
const result1 = {
|
||||||
|
bytesTotal: 150,
|
||||||
|
};
|
||||||
|
const result2 = {
|
||||||
|
bytesTotal: 120,
|
||||||
|
};
|
||||||
|
ScubaClientInstance.getLatestMetrics.resolves(result1);
|
||||||
|
ScubaClientInstance.getLatestMetrics.resolves(result2);
|
||||||
|
|
||||||
|
validateQuotas(mockBucket, {
|
||||||
|
account: 'test_1',
|
||||||
|
quota: 1000,
|
||||||
|
}, ['objectDelete'], 'objectDelete', -50, mockLog, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledTwice, true);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
|
||||||
|
'bucket',
|
||||||
|
'bucketName_1640995200000',
|
||||||
|
null,
|
||||||
|
{
|
||||||
|
action: 'objectDelete',
|
||||||
|
inflight: -50,
|
||||||
|
}
|
||||||
|
), true);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return null if quota is not exceeded', done => {
|
||||||
|
const result1 = {
|
||||||
|
bytesTotal: 80,
|
||||||
|
};
|
||||||
|
const result2 = {
|
||||||
|
bytesTotal: 90,
|
||||||
|
};
|
||||||
|
ScubaClientInstance.getLatestMetrics.resolves(result1);
|
||||||
|
ScubaClientInstance.getLatestMetrics.resolves(result2);
|
||||||
|
|
||||||
|
validateQuotas(mockBucket, {
|
||||||
|
account: 'test_1',
|
||||||
|
quota: 1000,
|
||||||
|
}, ['objectRestore', 'objectPut'], 'objectRestore', true, mockLog, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.callCount, 4);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
|
||||||
|
'bucket',
|
||||||
|
'bucketName_1640995200000',
|
||||||
|
null,
|
||||||
|
{
|
||||||
|
action: 'objectRestore',
|
||||||
|
inflight: true,
|
||||||
|
}
|
||||||
|
), true);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should return quota exceeded if account and bucket quotas are different', done => {
|
||||||
|
const result1 = {
|
||||||
|
bytesTotal: 150,
|
||||||
|
};
|
||||||
|
const result2 = {
|
||||||
|
bytesTotal: 120,
|
||||||
|
};
|
||||||
|
ScubaClientInstance.getLatestMetrics.resolves(result1);
|
||||||
|
ScubaClientInstance.getLatestMetrics.resolves(result2);
|
||||||
|
|
||||||
|
validateQuotas(mockBucket, {
|
||||||
|
account: 'test_1',
|
||||||
|
quota: 1000,
|
||||||
|
}, ['objectPut', 'getObject'], 'objectPut', 1, mockLog, err => {
|
||||||
|
assert.strictEqual(err.is.QuotaExceeded, true);
|
||||||
|
assert.strictEqual(ScubaClientInstance.getLatestMetrics.callCount, 4);
|
||||||
|
done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
|
@ -0,0 +1,86 @@
|
||||||
|
const assert = require('assert');
|
||||||
|
const sinon = require('sinon');
|
||||||
|
const { ScubaClientImpl } = require('../../../lib/scuba/wrapper');
|
||||||
|
|
||||||
|
describe('ScubaClientImpl', () => {
|
||||||
|
let client;
|
||||||
|
let log;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
client = new ScubaClientImpl({ scuba: true });
|
||||||
|
log = {
|
||||||
|
info: sinon.spy(),
|
||||||
|
warn: sinon.spy(),
|
||||||
|
};
|
||||||
|
client.setup(log);
|
||||||
|
});
|
||||||
|
|
||||||
|
afterEach(() => {
|
||||||
|
sinon.restore();
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('setup', () => {
|
||||||
|
it('should enable Scuba and start periodic health check', () => {
|
||||||
|
client.setup(log);
|
||||||
|
|
||||||
|
assert.strictEqual(client.enabled, true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not enable Scuba if config.scuba is falsy', () => {
|
||||||
|
client = new ScubaClientImpl({ scuba: false });
|
||||||
|
client.setup(log);
|
||||||
|
|
||||||
|
assert.strictEqual(client.enabled, false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('_healthCheck', () => {
|
||||||
|
it('should enable Scuba if health check passes', async () => {
|
||||||
|
sinon.stub(client, 'healthCheck').resolves();
|
||||||
|
|
||||||
|
await client._healthCheck();
|
||||||
|
|
||||||
|
assert.strictEqual(client.enabled, true);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should disable Scuba if health check fails', async () => {
|
||||||
|
const error = new Error('Health check failed');
|
||||||
|
sinon.stub(client, 'healthCheck').rejects(error);
|
||||||
|
|
||||||
|
await client._healthCheck();
|
||||||
|
|
||||||
|
assert.strictEqual(client.enabled, false);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('periodicHealthCheck', () => {
|
||||||
|
let healthCheckStub;
|
||||||
|
let setIntervalStub;
|
||||||
|
let clearIntervalStub;
|
||||||
|
|
||||||
|
beforeEach(() => {
|
||||||
|
healthCheckStub = sinon.stub(client, '_healthCheck');
|
||||||
|
setIntervalStub = sinon.stub(global, 'setInterval');
|
||||||
|
clearIntervalStub = sinon.stub(global, 'clearInterval');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should call _healthCheck and start periodic health check', () => {
|
||||||
|
client._healthCheckTimer = null;
|
||||||
|
client.periodicHealthCheck();
|
||||||
|
|
||||||
|
assert(healthCheckStub.calledOnce);
|
||||||
|
assert(setIntervalStub.calledOnce);
|
||||||
|
assert(clearIntervalStub.notCalled);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should clear previous health check timer before starting a new one', () => {
|
||||||
|
client._healthCheckTimer = 123;
|
||||||
|
|
||||||
|
client.periodicHealthCheck();
|
||||||
|
|
||||||
|
assert(healthCheckStub.calledOnce);
|
||||||
|
assert(setIntervalStub.calledOnce);
|
||||||
|
assert(clearIntervalStub.calledOnceWith(123));
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
|
@ -100,6 +100,10 @@
|
||||||
"name": "zenko",
|
"name": "zenko",
|
||||||
"sentinels": "localhost:6379"
|
"sentinels": "localhost:6379"
|
||||||
},
|
},
|
||||||
|
"scuba": {
|
||||||
|
"host": "localhost",
|
||||||
|
"port": 8100
|
||||||
|
},
|
||||||
"utapi": {
|
"utapi": {
|
||||||
"redis": {
|
"redis": {
|
||||||
"host": "localhost",
|
"host": "localhost",
|
||||||
|
|
|
@ -0,0 +1,104 @@
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
const express = require('express');
|
||||||
|
|
||||||
|
const inflightFlushFrequencyMS = 200;
|
||||||
|
|
||||||
|
class Scuba {
|
||||||
|
constructor() {
|
||||||
|
this._server = null;
|
||||||
|
this._port = 8100;
|
||||||
|
this._data = {
|
||||||
|
bucket: new Map(),
|
||||||
|
};
|
||||||
|
this._app = express();
|
||||||
|
}
|
||||||
|
|
||||||
|
_initiateRoutes() {
|
||||||
|
this._app.use(express.json());
|
||||||
|
|
||||||
|
this._app.get('/health/deep', (req, res) => {
|
||||||
|
const headerValue = req.header('error');
|
||||||
|
if (headerValue) {
|
||||||
|
return res.status(500).send(errors.InternalError);
|
||||||
|
}
|
||||||
|
return res.status(204).end();
|
||||||
|
});
|
||||||
|
|
||||||
|
this._app.post('/metrics/bucket/:bucket/latest', (req, res) => {
|
||||||
|
const bucketName = req.params.bucket;
|
||||||
|
const inflight = Number(req.body?.inflight) || 0;
|
||||||
|
this._updateData({
|
||||||
|
action: req.body?.action,
|
||||||
|
bucket: bucketName,
|
||||||
|
inflight,
|
||||||
|
});
|
||||||
|
const immediateInflights = req.body?.action === 'objectRestore' ? 0 : inflight;
|
||||||
|
res.json({
|
||||||
|
bytesTotal: (this._data.bucket.get(bucketName)?.current || 0) +
|
||||||
|
(this._data.bucket.get(bucketName)?.nonCurrent || 0) +
|
||||||
|
(this._data.bucket.get(bucketName)?.inflight || 0) +
|
||||||
|
immediateInflights,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
_updateData(event) {
|
||||||
|
const { action, inflight, bucket } = event;
|
||||||
|
let timeout = inflightFlushFrequencyMS;
|
||||||
|
if (action === 'objectRestore') {
|
||||||
|
timeout = 0;
|
||||||
|
}
|
||||||
|
if (!this._data.bucket.get(bucket)) {
|
||||||
|
this._data.bucket.set(bucket, { current: 0, nonCurrent: 0, inflight: 0 });
|
||||||
|
}
|
||||||
|
if (timeout) {
|
||||||
|
setTimeout(() => {
|
||||||
|
if (this._data.bucket.get(bucket)) {
|
||||||
|
this._data.bucket.set(bucket, {
|
||||||
|
current: this._data.bucket.get(bucket).current,
|
||||||
|
nonCurrent: this._data.bucket.get(bucket).nonCurrent,
|
||||||
|
inflight: this._data.bucket.get(bucket).inflight + inflight,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}, timeout);
|
||||||
|
} else {
|
||||||
|
if (this._data.bucket.get(bucket)) {
|
||||||
|
this._data.bucket.set(bucket, {
|
||||||
|
current: this._data.bucket.get(bucket).current,
|
||||||
|
nonCurrent: this._data.bucket.get(bucket).nonCurrent,
|
||||||
|
inflight: this._data.bucket.get(bucket).inflight + inflight,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
start() {
|
||||||
|
this._initiateRoutes();
|
||||||
|
this._server = this._app.listen(this._port);
|
||||||
|
}
|
||||||
|
|
||||||
|
reset() {
|
||||||
|
this._data = {
|
||||||
|
bucket: new Map(),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
stop() {
|
||||||
|
this._server.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
getInflightsForBucket(bucketName) {
|
||||||
|
let inflightCount = 0;
|
||||||
|
this._data.bucket.forEach((value, key) => {
|
||||||
|
if (key.startsWith(`${bucketName}_`)) {
|
||||||
|
inflightCount += value.inflight;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return inflightCount;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
Scuba,
|
||||||
|
inflightFlushFrequencyMS,
|
||||||
|
};
|
|
@ -1,6 +1,7 @@
|
||||||
/* eslint-disable global-require */
|
/* eslint-disable global-require */
|
||||||
const index = {
|
const index = {
|
||||||
Utapi: require('./Utapi'),
|
Utapi: require('./Utapi'),
|
||||||
|
Scuba: require('./Scuba'),
|
||||||
};
|
};
|
||||||
|
|
||||||
module.exports = index;
|
module.exports = index;
|
||||||
|
|
170
yarn.lock
170
yarn.lock
|
@ -2,6 +2,57 @@
|
||||||
# yarn lockfile v1
|
# yarn lockfile v1
|
||||||
|
|
||||||
|
|
||||||
|
"@aws-crypto/crc32@3.0.0":
|
||||||
|
version "3.0.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@aws-crypto/crc32/-/crc32-3.0.0.tgz#07300eca214409c33e3ff769cd5697b57fdd38fa"
|
||||||
|
integrity sha512-IzSgsrxUcsrejQbPVilIKy16kAT52EwB6zSaI+M3xxIhKh5+aldEyvI+z6erM7TCLB2BJsFrtHjp6/4/sr+3dA==
|
||||||
|
dependencies:
|
||||||
|
"@aws-crypto/util" "^3.0.0"
|
||||||
|
"@aws-sdk/types" "^3.222.0"
|
||||||
|
tslib "^1.11.1"
|
||||||
|
|
||||||
|
"@aws-crypto/sha256-js@^5.2.0":
|
||||||
|
version "5.2.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@aws-crypto/sha256-js/-/sha256-js-5.2.0.tgz#c4fdb773fdbed9a664fc1a95724e206cf3860042"
|
||||||
|
integrity sha512-FFQQyu7edu4ufvIZ+OadFpHHOt+eSTBaYaki44c+akjg7qZg9oOQeLlk77F6tSYqjDAFClrHJk9tMf0HdVyOvA==
|
||||||
|
dependencies:
|
||||||
|
"@aws-crypto/util" "^5.2.0"
|
||||||
|
"@aws-sdk/types" "^3.222.0"
|
||||||
|
tslib "^2.6.2"
|
||||||
|
|
||||||
|
"@aws-crypto/util@^3.0.0":
|
||||||
|
version "3.0.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@aws-crypto/util/-/util-3.0.0.tgz#1c7ca90c29293f0883468ad48117937f0fe5bfb0"
|
||||||
|
integrity sha512-2OJlpeJpCR48CC8r+uKVChzs9Iungj9wkZrl8Z041DWEWvyIHILYKCPNzJghKsivj+S3mLo6BVc7mBNzdxA46w==
|
||||||
|
dependencies:
|
||||||
|
"@aws-sdk/types" "^3.222.0"
|
||||||
|
"@aws-sdk/util-utf8-browser" "^3.0.0"
|
||||||
|
tslib "^1.11.1"
|
||||||
|
|
||||||
|
"@aws-crypto/util@^5.2.0":
|
||||||
|
version "5.2.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@aws-crypto/util/-/util-5.2.0.tgz#71284c9cffe7927ddadac793c14f14886d3876da"
|
||||||
|
integrity sha512-4RkU9EsI6ZpBve5fseQlGNUWKMa1RLPQ1dnjnQoe07ldfIzcsGb5hC5W0Dm7u423KWzawlrpbjXBrXCEv9zazQ==
|
||||||
|
dependencies:
|
||||||
|
"@aws-sdk/types" "^3.222.0"
|
||||||
|
"@smithy/util-utf8" "^2.0.0"
|
||||||
|
tslib "^2.6.2"
|
||||||
|
|
||||||
|
"@aws-sdk/types@^3.222.0":
|
||||||
|
version "3.535.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@aws-sdk/types/-/types-3.535.0.tgz#5e6479f31299dd9df170e63f4d10fe739008cf04"
|
||||||
|
integrity sha512-aY4MYfduNj+sRR37U7XxYR8wemfbKP6lx00ze2M2uubn7mZotuVrWYAafbMSXrdEMSToE5JDhr28vArSOoLcSg==
|
||||||
|
dependencies:
|
||||||
|
"@smithy/types" "^2.12.0"
|
||||||
|
tslib "^2.6.2"
|
||||||
|
|
||||||
|
"@aws-sdk/util-utf8-browser@^3.0.0":
|
||||||
|
version "3.259.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@aws-sdk/util-utf8-browser/-/util-utf8-browser-3.259.0.tgz#3275a6f5eb334f96ca76635b961d3c50259fd9ff"
|
||||||
|
integrity sha512-UvFa/vR+e19XookZF8RzFZBrw2EUkQWxiBW0yYQAhvk3C+QVGl0H3ouca8LDBlBfQKXwmW3huo/59H8rwb1wJw==
|
||||||
|
dependencies:
|
||||||
|
tslib "^2.3.1"
|
||||||
|
|
||||||
"@azure/abort-controller@^1.0.0":
|
"@azure/abort-controller@^1.0.0":
|
||||||
version "1.1.0"
|
version "1.1.0"
|
||||||
resolved "https://registry.yarnpkg.com/@azure/abort-controller/-/abort-controller-1.1.0.tgz#788ee78457a55af8a1ad342acb182383d2119249"
|
resolved "https://registry.yarnpkg.com/@azure/abort-controller/-/abort-controller-1.1.0.tgz#788ee78457a55af8a1ad342acb182383d2119249"
|
||||||
|
@ -404,6 +455,82 @@
|
||||||
resolved "https://registry.yarnpkg.com/@sinonjs/text-encoding/-/text-encoding-0.7.2.tgz#5981a8db18b56ba38ef0efb7d995b12aa7b51918"
|
resolved "https://registry.yarnpkg.com/@sinonjs/text-encoding/-/text-encoding-0.7.2.tgz#5981a8db18b56ba38ef0efb7d995b12aa7b51918"
|
||||||
integrity sha512-sXXKG+uL9IrKqViTtao2Ws6dy0znu9sOaP1di/jKGW1M6VssO8vlpXCQcpZ+jisQ1tTFAC5Jo/EOzFbggBagFQ==
|
integrity sha512-sXXKG+uL9IrKqViTtao2Ws6dy0znu9sOaP1di/jKGW1M6VssO8vlpXCQcpZ+jisQ1tTFAC5Jo/EOzFbggBagFQ==
|
||||||
|
|
||||||
|
"@smithy/eventstream-codec@^2.2.0":
|
||||||
|
version "2.2.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@smithy/eventstream-codec/-/eventstream-codec-2.2.0.tgz#63d74fa817188995eb55e792a38060b0ede98dc4"
|
||||||
|
integrity sha512-8janZoJw85nJmQZc4L8TuePp2pk1nxLgkxIR0TUjKJ5Dkj5oelB9WtiSSGXCQvNsJl0VSTvK/2ueMXxvpa9GVw==
|
||||||
|
dependencies:
|
||||||
|
"@aws-crypto/crc32" "3.0.0"
|
||||||
|
"@smithy/types" "^2.12.0"
|
||||||
|
"@smithy/util-hex-encoding" "^2.2.0"
|
||||||
|
tslib "^2.6.2"
|
||||||
|
|
||||||
|
"@smithy/is-array-buffer@^2.2.0":
|
||||||
|
version "2.2.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@smithy/is-array-buffer/-/is-array-buffer-2.2.0.tgz#f84f0d9f9a36601a9ca9381688bd1b726fd39111"
|
||||||
|
integrity sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA==
|
||||||
|
dependencies:
|
||||||
|
tslib "^2.6.2"
|
||||||
|
|
||||||
|
"@smithy/signature-v4@^2.1.1":
|
||||||
|
version "2.2.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@smithy/signature-v4/-/signature-v4-2.2.0.tgz#8fe6a574188b71fba6056111b88d50c84babb060"
|
||||||
|
integrity sha512-+B5TNzj/fRZzVW3z8UUJOkNx15+4E0CLuvJmJUA1JUIZFp3rdJ/M2H5r2SqltaVPXL0oIxv/6YK92T9TsFGbFg==
|
||||||
|
dependencies:
|
||||||
|
"@smithy/eventstream-codec" "^2.2.0"
|
||||||
|
"@smithy/is-array-buffer" "^2.2.0"
|
||||||
|
"@smithy/types" "^2.12.0"
|
||||||
|
"@smithy/util-hex-encoding" "^2.2.0"
|
||||||
|
"@smithy/util-middleware" "^2.2.0"
|
||||||
|
"@smithy/util-uri-escape" "^2.2.0"
|
||||||
|
"@smithy/util-utf8" "^2.3.0"
|
||||||
|
tslib "^2.6.2"
|
||||||
|
|
||||||
|
"@smithy/types@^2.12.0":
|
||||||
|
version "2.12.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@smithy/types/-/types-2.12.0.tgz#c44845f8ba07e5e8c88eda5aed7e6a0c462da041"
|
||||||
|
integrity sha512-QwYgloJ0sVNBeBuBs65cIkTbfzV/Q6ZNPCJ99EICFEdJYG50nGIY/uYXp+TbsdJReIuPr0a0kXmCvren3MbRRw==
|
||||||
|
dependencies:
|
||||||
|
tslib "^2.6.2"
|
||||||
|
|
||||||
|
"@smithy/util-buffer-from@^2.2.0":
|
||||||
|
version "2.2.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@smithy/util-buffer-from/-/util-buffer-from-2.2.0.tgz#6fc88585165ec73f8681d426d96de5d402021e4b"
|
||||||
|
integrity sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA==
|
||||||
|
dependencies:
|
||||||
|
"@smithy/is-array-buffer" "^2.2.0"
|
||||||
|
tslib "^2.6.2"
|
||||||
|
|
||||||
|
"@smithy/util-hex-encoding@^2.2.0":
|
||||||
|
version "2.2.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@smithy/util-hex-encoding/-/util-hex-encoding-2.2.0.tgz#87edb7c88c2f422cfca4bb21f1394ae9602c5085"
|
||||||
|
integrity sha512-7iKXR+/4TpLK194pVjKiasIyqMtTYJsgKgM242Y9uzt5dhHnUDvMNb+3xIhRJ9QhvqGii/5cRUt4fJn3dtXNHQ==
|
||||||
|
dependencies:
|
||||||
|
tslib "^2.6.2"
|
||||||
|
|
||||||
|
"@smithy/util-middleware@^2.2.0":
|
||||||
|
version "2.2.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@smithy/util-middleware/-/util-middleware-2.2.0.tgz#80cfad40f6cca9ffe42a5899b5cb6abd53a50006"
|
||||||
|
integrity sha512-L1qpleXf9QD6LwLCJ5jddGkgWyuSvWBkJwWAZ6kFkdifdso+sk3L3O1HdmPvCdnCK3IS4qWyPxev01QMnfHSBw==
|
||||||
|
dependencies:
|
||||||
|
"@smithy/types" "^2.12.0"
|
||||||
|
tslib "^2.6.2"
|
||||||
|
|
||||||
|
"@smithy/util-uri-escape@^2.2.0":
|
||||||
|
version "2.2.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@smithy/util-uri-escape/-/util-uri-escape-2.2.0.tgz#56f5764051a33b67bc93fdd2a869f971b0635406"
|
||||||
|
integrity sha512-jtmJMyt1xMD/d8OtbVJ2gFZOSKc+ueYJZPW20ULW1GOp/q/YIM0wNh+u8ZFao9UaIGz4WoPW8hC64qlWLIfoDA==
|
||||||
|
dependencies:
|
||||||
|
tslib "^2.6.2"
|
||||||
|
|
||||||
|
"@smithy/util-utf8@^2.0.0", "@smithy/util-utf8@^2.3.0":
|
||||||
|
version "2.3.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/@smithy/util-utf8/-/util-utf8-2.3.0.tgz#dd96d7640363259924a214313c3cf16e7dd329c5"
|
||||||
|
integrity sha512-R8Rdn8Hy72KKcebgLiv8jQcQkXoLMOGGv5uI1/k0l+snqkOzQ1R0ChUBCxWMlBsFMekWjq0wRudIweFs7sKT5A==
|
||||||
|
dependencies:
|
||||||
|
"@smithy/util-buffer-from" "^2.2.0"
|
||||||
|
tslib "^2.6.2"
|
||||||
|
|
||||||
"@socket.io/component-emitter@~3.1.0":
|
"@socket.io/component-emitter@~3.1.0":
|
||||||
version "3.1.0"
|
version "3.1.0"
|
||||||
resolved "https://registry.yarnpkg.com/@socket.io/component-emitter/-/component-emitter-3.1.0.tgz#96116f2a912e0c02817345b3c10751069920d553"
|
resolved "https://registry.yarnpkg.com/@socket.io/component-emitter/-/component-emitter-3.1.0.tgz#96116f2a912e0c02817345b3c10751069920d553"
|
||||||
|
@ -794,9 +921,9 @@ arraybuffer.slice@~0.0.7:
|
||||||
optionalDependencies:
|
optionalDependencies:
|
||||||
ioctl "^2.0.2"
|
ioctl "^2.0.2"
|
||||||
|
|
||||||
"arsenal@git+https://github.com/scality/arsenal#8.1.127":
|
"arsenal@git+https://github.com/scality/arsenal#77e9b92f3e775e39f5f903a00b702a86b2aa75a1":
|
||||||
version "8.1.127"
|
version "8.1.128"
|
||||||
resolved "git+https://github.com/scality/arsenal#c2ab4a2052e46a19504ba7014d3c4030aa04aa41"
|
resolved "git+https://github.com/scality/arsenal#77e9b92f3e775e39f5f903a00b702a86b2aa75a1"
|
||||||
dependencies:
|
dependencies:
|
||||||
"@azure/identity" "^3.1.1"
|
"@azure/identity" "^3.1.1"
|
||||||
"@azure/storage-blob" "^12.12.0"
|
"@azure/storage-blob" "^12.12.0"
|
||||||
|
@ -937,6 +1064,15 @@ axios@^0.18.0:
|
||||||
follow-redirects "1.5.10"
|
follow-redirects "1.5.10"
|
||||||
is-buffer "^2.0.2"
|
is-buffer "^2.0.2"
|
||||||
|
|
||||||
|
axios@^1.3.4:
|
||||||
|
version "1.6.8"
|
||||||
|
resolved "https://registry.yarnpkg.com/axios/-/axios-1.6.8.tgz#66d294951f5d988a00e87a0ffb955316a619ea66"
|
||||||
|
integrity sha512-v/ZHtJDU39mDpyBoFVkETcd/uNdxrWRrg3bKpOKzXFA6Bvqopts6ALSMU3y6ijYxbw2B+wPrIv46egTzJXCLGQ==
|
||||||
|
dependencies:
|
||||||
|
follow-redirects "^1.15.6"
|
||||||
|
form-data "^4.0.0"
|
||||||
|
proxy-from-env "^1.1.0"
|
||||||
|
|
||||||
babel-code-frame@^6.26.0:
|
babel-code-frame@^6.26.0:
|
||||||
version "6.26.0"
|
version "6.26.0"
|
||||||
resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.26.0.tgz#63fd43f7dc1e3bb7ce35947db8fe369a3f58c74b"
|
resolved "https://registry.yarnpkg.com/babel-code-frame/-/babel-code-frame-6.26.0.tgz#63fd43f7dc1e3bb7ce35947db8fe369a3f58c74b"
|
||||||
|
@ -2356,6 +2492,11 @@ follow-redirects@^1.0.0:
|
||||||
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.2.tgz#b460864144ba63f2681096f274c4e57026da2c13"
|
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.2.tgz#b460864144ba63f2681096f274c4e57026da2c13"
|
||||||
integrity sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==
|
integrity sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==
|
||||||
|
|
||||||
|
follow-redirects@^1.15.6:
|
||||||
|
version "1.15.6"
|
||||||
|
resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.6.tgz#7f815c0cda4249c74ff09e95ef97c23b5fd0399b"
|
||||||
|
integrity sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==
|
||||||
|
|
||||||
for-each@^0.3.3:
|
for-each@^0.3.3:
|
||||||
version "0.3.3"
|
version "0.3.3"
|
||||||
resolved "https://registry.yarnpkg.com/for-each/-/for-each-0.3.3.tgz#69b447e88a0a5d32c3e7084f3f1710034b21376e"
|
resolved "https://registry.yarnpkg.com/for-each/-/for-each-0.3.3.tgz#69b447e88a0a5d32c3e7084f3f1710034b21376e"
|
||||||
|
@ -4674,6 +4815,11 @@ proxy-addr@~2.0.7:
|
||||||
forwarded "0.2.0"
|
forwarded "0.2.0"
|
||||||
ipaddr.js "1.9.1"
|
ipaddr.js "1.9.1"
|
||||||
|
|
||||||
|
proxy-from-env@^1.1.0:
|
||||||
|
version "1.1.0"
|
||||||
|
resolved "https://registry.yarnpkg.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz#e102f16ca355424865755d2c9e8ea4f24d58c3e2"
|
||||||
|
integrity sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==
|
||||||
|
|
||||||
prr@~0.0.0:
|
prr@~0.0.0:
|
||||||
version "0.0.0"
|
version "0.0.0"
|
||||||
resolved "https://registry.yarnpkg.com/prr/-/prr-0.0.0.tgz#1a84b85908325501411853d0081ee3fa86e2926a"
|
resolved "https://registry.yarnpkg.com/prr/-/prr-0.0.0.tgz#1a84b85908325501411853d0081ee3fa86e2926a"
|
||||||
|
@ -5030,6 +5176,14 @@ sax@>=0.6.0, sax@^1.2.4:
|
||||||
resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9"
|
resolved "https://registry.yarnpkg.com/sax/-/sax-1.2.4.tgz#2816234e2378bddc4e5354fab5caa895df7100d9"
|
||||||
integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==
|
integrity sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==
|
||||||
|
|
||||||
|
"scubaclient@git+https://github.com/scality/scubaclient.git":
|
||||||
|
version "1.0.0"
|
||||||
|
resolved "git+https://github.com/scality/scubaclient.git#4b8584d02e8ac3cbffbf08e68715eac45182b5d0"
|
||||||
|
dependencies:
|
||||||
|
"@aws-crypto/sha256-js" "^5.2.0"
|
||||||
|
"@smithy/signature-v4" "^2.1.1"
|
||||||
|
axios "^1.3.4"
|
||||||
|
|
||||||
"semver@2 || 3 || 4 || 5", semver@^5.3.0, semver@^5.5.0, semver@^5.6.0:
|
"semver@2 || 3 || 4 || 5", semver@^5.3.0, semver@^5.5.0, semver@^5.6.0:
|
||||||
version "5.7.1"
|
version "5.7.1"
|
||||||
resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7"
|
resolved "https://registry.yarnpkg.com/semver/-/semver-5.7.1.tgz#a954f931aeba508d307bbf069eff0c01c96116f7"
|
||||||
|
@ -5653,11 +5807,21 @@ tsconfig-paths@^3.14.1:
|
||||||
minimist "^1.2.6"
|
minimist "^1.2.6"
|
||||||
strip-bom "^3.0.0"
|
strip-bom "^3.0.0"
|
||||||
|
|
||||||
|
tslib@^1.11.1:
|
||||||
|
version "1.14.1"
|
||||||
|
resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00"
|
||||||
|
integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==
|
||||||
|
|
||||||
tslib@^2.2.0:
|
tslib@^2.2.0:
|
||||||
version "2.5.2"
|
version "2.5.2"
|
||||||
resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.5.2.tgz#1b6f07185c881557b0ffa84b111a0106989e8338"
|
resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.5.2.tgz#1b6f07185c881557b0ffa84b111a0106989e8338"
|
||||||
integrity sha512-5svOrSA2w3iGFDs1HibEVBGbDrAY82bFQ3HZ3ixB+88nsbsWQoKqDRb5UBYAUPEzbBn6dAp5gRNXglySbx1MlA==
|
integrity sha512-5svOrSA2w3iGFDs1HibEVBGbDrAY82bFQ3HZ3ixB+88nsbsWQoKqDRb5UBYAUPEzbBn6dAp5gRNXglySbx1MlA==
|
||||||
|
|
||||||
|
tslib@^2.3.1, tslib@^2.6.2:
|
||||||
|
version "2.6.2"
|
||||||
|
resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae"
|
||||||
|
integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==
|
||||||
|
|
||||||
tunnel-agent@^0.6.0:
|
tunnel-agent@^0.6.0:
|
||||||
version "0.6.0"
|
version "0.6.0"
|
||||||
resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd"
|
resolved "https://registry.yarnpkg.com/tunnel-agent/-/tunnel-agent-0.6.0.tgz#27a5dea06b36b04a0a9966774b290868f0fc40fd"
|
||||||
|
|
Loading…
Reference in New Issue