Compare commits
9 Commits
developmen
...
improvemen
Author | SHA1 | Date |
---|---|---|
Jordi Bertran de Balanda | 9b92a50ca7 | |
Jordi Bertran de Balanda | a6230f5538 | |
Ronnie Smith | 3bfcf624bf | |
Jordi Bertran de Balanda | bd9209ef5e | |
Jordi Bertran de Balanda | 371cb689af | |
Xin LI | 674860ef8a | |
Xin LI | ce28e08d3e | |
Xin LI | 67df4fa207 | |
Xin LI | 4100ac73b2 |
|
@ -1 +1,6 @@
|
|||
{ "extends": "scality" }
|
||||
{
|
||||
"extends": "scality",
|
||||
"parserOptions": {
|
||||
"ecmaVersion": 2020
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ RUN cd /tmp \
|
|||
&& rm -rf /tmp/Python-$PY_VERSION.tgz
|
||||
|
||||
RUN yarn cache clean \
|
||||
&& yarn install --production --ignore-optional --ignore-engines --network-concurrency 1 \
|
||||
&& yarn install --production --ignore-optional --ignore-engines --network-concurrency 1 \
|
||||
&& apt-get autoremove --purge -y python git build-essential \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& yarn cache clean \
|
||||
|
|
|
@ -27,7 +27,7 @@ if (config.backends.data === 'file' ||
|
|||
dataServer.setup(err => {
|
||||
if (err) {
|
||||
logger.error('Error initializing REST data server',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
return;
|
||||
}
|
||||
dataServer.start();
|
||||
|
|
128
lib/Config.js
128
lib/Config.js
|
@ -41,10 +41,10 @@ function restEndpointsAssert(restEndpoints, locationConstraints) {
|
|||
'bad config: restEndpoints must be an object of endpoints');
|
||||
assert(Object.keys(restEndpoints).every(
|
||||
r => typeof restEndpoints[r] === 'string'),
|
||||
'bad config: each endpoint must be a string');
|
||||
'bad config: each endpoint must be a string');
|
||||
assert(Object.keys(restEndpoints).every(
|
||||
r => typeof locationConstraints[restEndpoints[r]] === 'object'),
|
||||
'bad config: rest endpoint target not in locationConstraints');
|
||||
'bad config: rest endpoint target not in locationConstraints');
|
||||
}
|
||||
|
||||
function gcpLocationConstraintAssert(location, locationObj) {
|
||||
|
@ -93,14 +93,14 @@ function gcpLocationConstraintAssert(location, locationObj) {
|
|||
'serviceKey must be set in locationConfig or environment variable');
|
||||
if (keyFilename) {
|
||||
assert.strictEqual(typeof keyFilename, 'string',
|
||||
`bad location constriant: "${location}" serviceCredentials ` +
|
||||
`bad location constriant: "${location}" serviceCredentials ` +
|
||||
`keyFilename "${keyFilename}" must be a string`);
|
||||
} else {
|
||||
assert.strictEqual(typeof serviceEmail, 'string',
|
||||
`bad location constriant: "${location}" serviceCredentials ` +
|
||||
`bad location constriant: "${location}" serviceCredentials ` +
|
||||
`serviceEmail "${serviceEmail}" must be a string`);
|
||||
assert.strictEqual(typeof serviceKey, 'string',
|
||||
`bad location constriant: "${location}"" serviceCredentials ` +
|
||||
`bad location constriant: "${location}"" serviceCredentials ` +
|
||||
`serviceKey "${serviceKey}" must be a string`);
|
||||
}
|
||||
}
|
||||
|
@ -161,17 +161,17 @@ function locationConstraintAssert(locationConstraints) {
|
|||
`be one of ${supportedBackends}`);
|
||||
assert(typeof locationConstraints[l].legacyAwsBehavior
|
||||
=== 'boolean',
|
||||
'bad config: locationConstraints[region]' +
|
||||
'bad config: locationConstraints[region]' +
|
||||
'.legacyAwsBehavior is mandatory and must be a boolean');
|
||||
if (locationConstraints[l].details.serverSideEncryption !== undefined) {
|
||||
assert(typeof locationConstraints[l].details.serverSideEncryption
|
||||
=== 'boolean',
|
||||
'bad config: locationConstraints[region]' +
|
||||
'bad config: locationConstraints[region]' +
|
||||
'.details.serverSideEncryption must be a boolean');
|
||||
}
|
||||
assert(typeof locationConstraints[l].details
|
||||
=== 'object',
|
||||
'bad config: locationConstraints[region].details is ' +
|
||||
'bad config: locationConstraints[region].details is ' +
|
||||
'mandatory and must be an object');
|
||||
const details = locationConstraints[l].details;
|
||||
const stringFields = [
|
||||
|
@ -265,7 +265,7 @@ function parseUtapiReindex({ enabled, schedule, sentinel, bucketd }) {
|
|||
function requestsConfigAssert(requestsConfig) {
|
||||
if (requestsConfig.viaProxy !== undefined) {
|
||||
assert(typeof requestsConfig.viaProxy === 'boolean',
|
||||
'config: invalid requests configuration. viaProxy must be a ' +
|
||||
'config: invalid requests configuration. viaProxy must be a ' +
|
||||
'boolean');
|
||||
|
||||
if (requestsConfig.viaProxy) {
|
||||
|
@ -328,7 +328,7 @@ class Config extends EventEmitter {
|
|||
this.configPath = process.env.S3_CONFIG_FILE;
|
||||
}
|
||||
this.locationConfigPath = path.join(__dirname,
|
||||
'../locationConfig.json');
|
||||
'../locationConfig.json');
|
||||
if (process.env.CI === 'true' && !process.env.S3_END_TO_END) {
|
||||
this.locationConfigPath = path.join(__dirname,
|
||||
'../tests/locationConfig/locationConfigTests.json');
|
||||
|
@ -347,7 +347,7 @@ class Config extends EventEmitter {
|
|||
let locationConfig;
|
||||
try {
|
||||
const data = fs.readFileSync(this.locationConfigPath,
|
||||
{ encoding: 'utf-8' });
|
||||
{ encoding: 'utf-8' });
|
||||
locationConfig = JSON.parse(data);
|
||||
} catch (err) {
|
||||
throw new Error(`could not parse location config file:
|
||||
|
@ -380,8 +380,8 @@ class Config extends EventEmitter {
|
|||
'bad config: TLS file specification must be a string');
|
||||
}
|
||||
const tlsFilePath = (tlsFileName[0] === '/')
|
||||
? tlsFileName
|
||||
: path.join(this._basepath, tlsFileName);
|
||||
? tlsFileName
|
||||
: path.join(this._basepath, tlsFileName);
|
||||
let tlsFileContent;
|
||||
try {
|
||||
tlsFileContent = fs.readFileSync(tlsFilePath);
|
||||
|
@ -396,7 +396,7 @@ class Config extends EventEmitter {
|
|||
let config;
|
||||
try {
|
||||
const data = fs.readFileSync(this.configPath,
|
||||
{ encoding: 'utf-8' });
|
||||
{ encoding: 'utf-8' });
|
||||
config = JSON.parse(data);
|
||||
} catch (err) {
|
||||
throw new Error(`could not parse config file: ${err.message}`);
|
||||
|
@ -413,7 +413,7 @@ class Config extends EventEmitter {
|
|||
if (config.listenOn !== undefined) {
|
||||
assert(Array.isArray(config.listenOn)
|
||||
&& config.listenOn.every(e => typeof e === 'string'),
|
||||
'bad config: listenOn must be a list of strings');
|
||||
'bad config: listenOn must be a list of strings');
|
||||
config.listenOn.forEach(item => {
|
||||
const lastColon = item.lastIndexOf(':');
|
||||
// if address is IPv6 format, it includes brackets
|
||||
|
@ -504,14 +504,14 @@ class Config extends EventEmitter {
|
|||
if (config.websiteEndpoints !== undefined) {
|
||||
assert(Array.isArray(config.websiteEndpoints)
|
||||
&& config.websiteEndpoints.every(e => typeof e === 'string'),
|
||||
'bad config: websiteEndpoints must be a list of strings');
|
||||
'bad config: websiteEndpoints must be a list of strings');
|
||||
this.websiteEndpoints = config.websiteEndpoints;
|
||||
}
|
||||
|
||||
this.clusters = false;
|
||||
if (config.clusters !== undefined) {
|
||||
assert(Number.isInteger(config.clusters) && config.clusters > 0,
|
||||
'bad config: clusters must be a positive integer');
|
||||
'bad config: clusters must be a positive integer');
|
||||
this.clusters = config.clusters;
|
||||
}
|
||||
|
||||
|
@ -529,27 +529,27 @@ class Config extends EventEmitter {
|
|||
if (config.cdmi !== undefined) {
|
||||
if (config.cdmi.host !== undefined) {
|
||||
assert.strictEqual(typeof config.cdmi.host, 'string',
|
||||
'bad config: cdmi host must be a string');
|
||||
'bad config: cdmi host must be a string');
|
||||
this.cdmi.host = config.cdmi.host;
|
||||
}
|
||||
if (config.cdmi.port !== undefined) {
|
||||
assert(Number.isInteger(config.cdmi.port)
|
||||
&& config.cdmi.port > 0,
|
||||
'bad config: cdmi port must be a positive integer');
|
||||
'bad config: cdmi port must be a positive integer');
|
||||
this.cdmi.port = config.cdmi.port;
|
||||
}
|
||||
if (config.cdmi.path !== undefined) {
|
||||
assert(typeof config.cdmi.path === 'string',
|
||||
'bad config: cdmi.path must be a string');
|
||||
'bad config: cdmi.path must be a string');
|
||||
assert(config.cdmi.path.length > 0,
|
||||
'bad config: cdmi.path is empty');
|
||||
'bad config: cdmi.path is empty');
|
||||
assert(config.cdmi.path.charAt(0) === '/',
|
||||
'bad config: cdmi.path should start with a "/"');
|
||||
'bad config: cdmi.path should start with a "/"');
|
||||
this.cdmi.path = config.cdmi.path;
|
||||
}
|
||||
if (config.cdmi.readonly !== undefined) {
|
||||
assert(typeof config.cdmi.readonly === 'boolean',
|
||||
'bad config: cdmi.readonly must be a boolean');
|
||||
'bad config: cdmi.readonly must be a boolean');
|
||||
this.cdmi.readonly = config.cdmi.readonly;
|
||||
} else {
|
||||
this.cdmi.readonly = true;
|
||||
|
@ -562,7 +562,7 @@ class Config extends EventEmitter {
|
|||
assert(config.bucketd.bootstrap instanceof Array
|
||||
&& config.bucketd.bootstrap.every(
|
||||
e => typeof e === 'string'),
|
||||
'bad config: bucketd.bootstrap must be a list of strings');
|
||||
'bad config: bucketd.bootstrap must be a list of strings');
|
||||
this.bucketd.bootstrap = config.bucketd.bootstrap;
|
||||
}
|
||||
|
||||
|
@ -571,12 +571,12 @@ class Config extends EventEmitter {
|
|||
if (config.vaultd.port !== undefined) {
|
||||
assert(Number.isInteger(config.vaultd.port)
|
||||
&& config.vaultd.port > 0,
|
||||
'bad config: vaultd port must be a positive integer');
|
||||
'bad config: vaultd port must be a positive integer');
|
||||
this.vaultd.port = config.vaultd.port;
|
||||
}
|
||||
if (config.vaultd.host !== undefined) {
|
||||
assert.strictEqual(typeof config.vaultd.host, 'string',
|
||||
'bad config: vaultd host must be a string');
|
||||
'bad config: vaultd host must be a string');
|
||||
this.vaultd.host = config.vaultd.host;
|
||||
}
|
||||
}
|
||||
|
@ -584,13 +584,13 @@ class Config extends EventEmitter {
|
|||
if (config.dataClient) {
|
||||
this.dataClient = {};
|
||||
assert.strictEqual(typeof config.dataClient.host, 'string',
|
||||
'bad config: data client host must be ' +
|
||||
'bad config: data client host must be ' +
|
||||
'a string');
|
||||
this.dataClient.host = config.dataClient.host;
|
||||
|
||||
assert(Number.isInteger(config.dataClient.port)
|
||||
&& config.dataClient.port > 0,
|
||||
'bad config: dataClient port must be a positive ' +
|
||||
'bad config: dataClient port must be a positive ' +
|
||||
'integer');
|
||||
this.dataClient.port = config.dataClient.port;
|
||||
}
|
||||
|
@ -604,7 +604,7 @@ class Config extends EventEmitter {
|
|||
|
||||
assert(Number.isInteger(config.metadataClient.port)
|
||||
&& config.metadataClient.port > 0,
|
||||
'bad config: metadata client port must be a ' +
|
||||
'bad config: metadata client port must be a ' +
|
||||
'positive integer');
|
||||
this.metadataClient.port = config.metadataClient.port;
|
||||
}
|
||||
|
@ -618,7 +618,7 @@ class Config extends EventEmitter {
|
|||
|
||||
assert(Number.isInteger(config.dataDaemon.port)
|
||||
&& config.dataDaemon.port > 0,
|
||||
'bad config: data daemon port must be a positive ' +
|
||||
'bad config: data daemon port must be a positive ' +
|
||||
'integer');
|
||||
this.dataDaemon.port = config.dataDaemon.port;
|
||||
|
||||
|
@ -629,7 +629,7 @@ class Config extends EventEmitter {
|
|||
*/
|
||||
this.dataDaemon.dataPath =
|
||||
process.env.S3DATAPATH ?
|
||||
process.env.S3DATAPATH : `${__dirname}/../localData`;
|
||||
process.env.S3DATAPATH : `${__dirname}/../localData`;
|
||||
}
|
||||
|
||||
if (config.metadataDaemon) {
|
||||
|
@ -642,7 +642,7 @@ class Config extends EventEmitter {
|
|||
|
||||
assert(Number.isInteger(config.metadataDaemon.port)
|
||||
&& config.metadataDaemon.port > 0,
|
||||
'bad config: metadata daemon port must be a ' +
|
||||
'bad config: metadata daemon port must be a ' +
|
||||
'positive integer');
|
||||
this.metadataDaemon.port = config.metadataDaemon.port;
|
||||
|
||||
|
@ -653,7 +653,7 @@ class Config extends EventEmitter {
|
|||
*/
|
||||
this.metadataDaemon.metadataPath =
|
||||
process.env.S3METADATAPATH ?
|
||||
process.env.S3METADATAPATH : `${__dirname}/../localMetadata`;
|
||||
process.env.S3METADATAPATH : `${__dirname}/../localMetadata`;
|
||||
|
||||
this.metadataDaemon.restEnabled =
|
||||
config.metadataDaemon.restEnabled;
|
||||
|
@ -699,7 +699,7 @@ class Config extends EventEmitter {
|
|||
this.redis.name = config.redis.name;
|
||||
assert(Array.isArray(config.redis.sentinels) ||
|
||||
typeof config.redis.sentinels === 'string',
|
||||
'bad config: redis sentinels must be an array or string');
|
||||
'bad config: redis sentinels must be an array or string');
|
||||
|
||||
if (typeof config.redis.sentinels === 'string') {
|
||||
config.redis.sentinels.split(',').forEach(item => {
|
||||
|
@ -720,7 +720,7 @@ class Config extends EventEmitter {
|
|||
|
||||
if (config.redis.sentinelPassword !== undefined) {
|
||||
assert(
|
||||
this._verifyRedisPassword(config.redis.sentinelPassword));
|
||||
this._verifyRedisPassword(config.redis.sentinelPassword));
|
||||
this.redis.sentinelPassword = config.redis.sentinelPassword;
|
||||
}
|
||||
} else {
|
||||
|
@ -751,14 +751,14 @@ class Config extends EventEmitter {
|
|||
if (config.utapi.port) {
|
||||
assert(Number.isInteger(config.utapi.port)
|
||||
&& config.utapi.port > 0,
|
||||
'bad config: utapi port must be a positive integer');
|
||||
'bad config: utapi port must be a positive integer');
|
||||
this.utapi.port = config.utapi.port;
|
||||
}
|
||||
if (utapiVersion === 1) {
|
||||
if (config.utapi.workers !== undefined) {
|
||||
assert(Number.isInteger(config.utapi.workers)
|
||||
&& config.utapi.workers > 0,
|
||||
'bad config: utapi workers must be a positive integer');
|
||||
'bad config: utapi workers must be a positive integer');
|
||||
this.utapi.workers = config.utapi.workers;
|
||||
}
|
||||
// Utapi uses the same localCache config defined for S3 to avoid
|
||||
|
@ -804,7 +804,7 @@ class Config extends EventEmitter {
|
|||
}
|
||||
if (config.utapi.redis.sentinelPassword !== undefined) {
|
||||
assert(
|
||||
this._verifyRedisPassword(config.utapi.redis.sentinelPassword),
|
||||
this._verifyRedisPassword(config.utapi.redis.sentinelPassword),
|
||||
'config: invalid password for utapi redis. password' +
|
||||
' must be a string');
|
||||
this.utapi.redis.sentinelPassword =
|
||||
|
@ -814,15 +814,15 @@ class Config extends EventEmitter {
|
|||
if (config.utapi.redis.retry.connectBackoff !== undefined) {
|
||||
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
|
||||
assert.strictEqual(typeof min, 'number',
|
||||
'utapi.redis.retry.connectBackoff: min must be a number');
|
||||
'utapi.redis.retry.connectBackoff: min must be a number');
|
||||
assert.strictEqual(typeof max, 'number',
|
||||
'utapi.redis.retry.connectBackoff: max must be a number');
|
||||
'utapi.redis.retry.connectBackoff: max must be a number');
|
||||
assert.strictEqual(typeof jitter, 'number',
|
||||
'utapi.redis.retry.connectBackoff: jitter must be a number');
|
||||
'utapi.redis.retry.connectBackoff: jitter must be a number');
|
||||
assert.strictEqual(typeof factor, 'number',
|
||||
'utapi.redis.retry.connectBackoff: factor must be a number');
|
||||
'utapi.redis.retry.connectBackoff: factor must be a number');
|
||||
assert.strictEqual(typeof deadline, 'number',
|
||||
'utapi.redis.retry.connectBackoff: deadline must be a number');
|
||||
'utapi.redis.retry.connectBackoff: deadline must be a number');
|
||||
}
|
||||
|
||||
this.utapi.redis.retry = config.utapi.redis.retry;
|
||||
|
@ -924,8 +924,8 @@ class Config extends EventEmitter {
|
|||
`bad config: utapi.filter.${state}.${field} must be an array of strings`);
|
||||
utapiResourceFilters[field] = { [state]: new Set(resources) };
|
||||
}
|
||||
}
|
||||
));
|
||||
},
|
||||
));
|
||||
this.utapi.filter = utapiResourceFilters;
|
||||
}
|
||||
}
|
||||
|
@ -934,12 +934,12 @@ class Config extends EventEmitter {
|
|||
if (config.log !== undefined) {
|
||||
if (config.log.logLevel !== undefined) {
|
||||
assert(typeof config.log.logLevel === 'string',
|
||||
'bad config: log.logLevel must be a string');
|
||||
'bad config: log.logLevel must be a string');
|
||||
this.log.logLevel = config.log.logLevel;
|
||||
}
|
||||
if (config.log.dumpLevel !== undefined) {
|
||||
assert(typeof config.log.dumpLevel === 'string',
|
||||
'bad config: log.dumpLevel must be a string');
|
||||
'bad config: log.dumpLevel must be a string');
|
||||
this.log.dumpLevel = config.log.dumpLevel;
|
||||
}
|
||||
}
|
||||
|
@ -1008,8 +1008,8 @@ class Config extends EventEmitter {
|
|||
cert: this._loadTlsFile(process.env.S3KMIP_CERT ||
|
||||
undefined),
|
||||
ca: (process.env.S3KMIP_CA
|
||||
? process.env.S3KMIP_CA.split(',')
|
||||
: []).map(this._loadTlsFile),
|
||||
? process.env.S3KMIP_CA.split(',')
|
||||
: []).map(this._loadTlsFile),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
@ -1044,12 +1044,12 @@ class Config extends EventEmitter {
|
|||
}
|
||||
if (port) {
|
||||
assert(typeof port === 'number',
|
||||
'bad config: KMIP TLS Port must be a number');
|
||||
'bad config: KMIP TLS Port must be a number');
|
||||
this.kmip.transport.tls.port = port;
|
||||
}
|
||||
if (host) {
|
||||
assert(typeof host === 'string',
|
||||
'bad config: KMIP TLS Host must be a string');
|
||||
'bad config: KMIP TLS Host must be a string');
|
||||
this.kmip.transport.tls.host = host;
|
||||
}
|
||||
|
||||
|
@ -1075,7 +1075,7 @@ class Config extends EventEmitter {
|
|||
'be an array');
|
||||
config.healthChecks.allowFrom.forEach(item => {
|
||||
assert(typeof item === 'string',
|
||||
'config: invalid healthcheck configuration. allowFrom IP ' +
|
||||
'config: invalid healthcheck configuration. allowFrom IP ' +
|
||||
'address must be a string');
|
||||
});
|
||||
this.healthChecks.allowFrom = defaultHealthChecks.allowFrom
|
||||
|
@ -1086,10 +1086,10 @@ class Config extends EventEmitter {
|
|||
assert(typeof config.certFilePaths === 'object' &&
|
||||
typeof config.certFilePaths.key === 'string' &&
|
||||
typeof config.certFilePaths.cert === 'string' && ((
|
||||
config.certFilePaths.ca &&
|
||||
config.certFilePaths.ca &&
|
||||
typeof config.certFilePaths.ca === 'string') ||
|
||||
!config.certFilePaths.ca)
|
||||
);
|
||||
!config.certFilePaths.ca),
|
||||
);
|
||||
}
|
||||
const { key, cert, ca } = config.certFilePaths ?
|
||||
config.certFilePaths : {};
|
||||
|
@ -1102,14 +1102,14 @@ class Config extends EventEmitter {
|
|||
capath = (ca[0] === '/') ? ca : `${this._basePath}/${ca}`;
|
||||
assert.doesNotThrow(() =>
|
||||
fs.accessSync(capath, fs.F_OK | fs.R_OK),
|
||||
`File not found or unreachable: ${capath}`);
|
||||
`File not found or unreachable: ${capath}`);
|
||||
}
|
||||
assert.doesNotThrow(() =>
|
||||
fs.accessSync(keypath, fs.F_OK | fs.R_OK),
|
||||
`File not found or unreachable: ${keypath}`);
|
||||
`File not found or unreachable: ${keypath}`);
|
||||
assert.doesNotThrow(() =>
|
||||
fs.accessSync(certpath, fs.F_OK | fs.R_OK),
|
||||
`File not found or unreachable: ${certpath}`);
|
||||
`File not found or unreachable: ${certpath}`);
|
||||
this.https = {
|
||||
cert: fs.readFileSync(certpath, 'ascii'),
|
||||
key: fs.readFileSync(keypath, 'ascii'),
|
||||
|
@ -1174,7 +1174,7 @@ class Config extends EventEmitter {
|
|||
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
|
||||
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
|
||||
'bad environment variable: S3BACKEND environment variable ' +
|
||||
'should be one of mem/file/scality/cdmi'
|
||||
'should be one of mem/file/scality/cdmi',
|
||||
);
|
||||
auth = process.env.S3BACKEND;
|
||||
data = process.env.S3BACKEND;
|
||||
|
@ -1195,8 +1195,8 @@ class Config extends EventEmitter {
|
|||
if (process.env.SCALITY_ACCESS_KEY_ID &&
|
||||
process.env.SCALITY_SECRET_ACCESS_KEY) {
|
||||
authData = buildAuthDataAccount(
|
||||
process.env.SCALITY_ACCESS_KEY_ID,
|
||||
process.env.SCALITY_SECRET_ACCESS_KEY);
|
||||
process.env.SCALITY_ACCESS_KEY_ID,
|
||||
process.env.SCALITY_SECRET_ACCESS_KEY);
|
||||
} else {
|
||||
authData = require(authfile);
|
||||
}
|
||||
|
@ -1209,7 +1209,7 @@ class Config extends EventEmitter {
|
|||
const validData = ['mem', 'file', 'scality', 'multiple'];
|
||||
assert(validData.indexOf(process.env.S3DATA) > -1,
|
||||
'bad environment variable: S3DATA environment variable ' +
|
||||
'should be one of mem/file/scality/multiple'
|
||||
'should be one of mem/file/scality/multiple',
|
||||
);
|
||||
data = process.env.S3DATA;
|
||||
}
|
||||
|
@ -1218,7 +1218,7 @@ class Config extends EventEmitter {
|
|||
}
|
||||
assert(this.locationConstraints !== undefined &&
|
||||
this.restEndpoints !== undefined,
|
||||
'bad config: locationConstraints and restEndpoints must be set'
|
||||
'bad config: locationConstraints and restEndpoints must be set',
|
||||
);
|
||||
|
||||
if (process.env.S3METADATA) {
|
||||
|
@ -1319,7 +1319,7 @@ class Config extends EventEmitter {
|
|||
|
||||
isAWSServerSideEncryption(locationConstraint) {
|
||||
return this.locationConstraints[locationConstraint].details
|
||||
.serverSideEncryption === true;
|
||||
.serverSideEncryption === true;
|
||||
}
|
||||
|
||||
getGcpServiceParams(locationConstraint) {
|
||||
|
|
|
@ -167,8 +167,8 @@ const api = {
|
|||
returnTagCount = checkedResults;
|
||||
}
|
||||
return tagConditionKeyAuth(authorizationResults, request, requestContexts, apiMethod, log,
|
||||
(err, tagAuthResults, updatedContexts) =>
|
||||
next(err, tagAuthResults, authorizationResults, userInfo, streamingV4Params, updatedContexts));
|
||||
(err, tagAuthResults, updatedContexts) =>
|
||||
next(err, tagAuthResults, authorizationResults, userInfo, streamingV4Params, updatedContexts));
|
||||
},
|
||||
], (err, tagAuthResults, authorizationResults, userInfo, streamingV4Params, updatedContexts) => {
|
||||
if (err) {
|
||||
|
@ -225,29 +225,29 @@ const api = {
|
|||
|
||||
// IAM policy -Tag condition keys require information from CloudServer for evaluation
|
||||
return tagConditionKeyAuth(authorizationResults, request, (updatedContexts || requestContexts),
|
||||
apiMethod, log, (err, tagAuthResults) => {
|
||||
if (err) {
|
||||
log.trace('tag authentication error', { error: err });
|
||||
return callback(err);
|
||||
}
|
||||
if (tagAuthResults) {
|
||||
const checkedResults = checkAuthResults(tagAuthResults);
|
||||
if (checkedResults instanceof Error) {
|
||||
return callback(checkedResults);
|
||||
apiMethod, log, (err, tagAuthResults) => {
|
||||
if (err) {
|
||||
log.trace('tag authentication error', { error: err });
|
||||
return callback(err);
|
||||
}
|
||||
returnTagCount = checkedResults;
|
||||
}
|
||||
if (apiMethod === 'objectCopy' ||
|
||||
if (tagAuthResults) {
|
||||
const checkedResults = checkAuthResults(tagAuthResults);
|
||||
if (checkedResults instanceof Error) {
|
||||
return callback(checkedResults);
|
||||
}
|
||||
returnTagCount = checkedResults;
|
||||
}
|
||||
if (apiMethod === 'objectCopy' ||
|
||||
apiMethod === 'objectPutCopyPart') {
|
||||
return this[apiMethod](userInfo, request, sourceBucket,
|
||||
sourceObject, sourceVersionId, log, callback);
|
||||
}
|
||||
if (apiMethod === 'objectGet') {
|
||||
return this[apiMethod](userInfo, request,
|
||||
returnTagCount, log, callback);
|
||||
}
|
||||
return this[apiMethod](userInfo, request, log, callback);
|
||||
});
|
||||
return this[apiMethod](userInfo, request, sourceBucket,
|
||||
sourceObject, sourceVersionId, log, callback);
|
||||
}
|
||||
if (apiMethod === 'objectGet') {
|
||||
return this[apiMethod](userInfo, request,
|
||||
returnTagCount, log, callback);
|
||||
}
|
||||
return this[apiMethod](userInfo, request, log, callback);
|
||||
});
|
||||
});
|
||||
return undefined;
|
||||
});
|
||||
|
|
|
@ -68,7 +68,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
|||
if (apiMethodAfterVersionCheck === 'objectCopy'
|
||||
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
||||
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
||||
'objectGet';
|
||||
'objectGet';
|
||||
const reqQuery = Object.assign({}, request.query,
|
||||
{ versionId: sourceVersionId });
|
||||
const getRequestContext = new RequestContext(request.headers,
|
||||
|
@ -96,7 +96,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
|||
|| apiMethodAfterVersionCheck === 'objectGetVersion') {
|
||||
const objectGetTaggingAction = (request.query &&
|
||||
request.query.versionId) ? 'objectGetTaggingVersion' :
|
||||
'objectGetTagging';
|
||||
'objectGetTagging';
|
||||
const getRequestContext =
|
||||
generateRequestContext(apiMethodAfterVersionCheck);
|
||||
const getTaggingRequestContext =
|
||||
|
|
|
@ -47,20 +47,20 @@ function updateRequestContexts(request, requestContexts, apiMethod, log, cb) {
|
|||
}
|
||||
const reqVersionId = decodedVidResult;
|
||||
return metadata.getObjectMD(bucketName, objectKey, { versionId: reqVersionId }, log,
|
||||
(err, objMD) => {
|
||||
if (err) {
|
||||
if (err.NoSuchKey) {
|
||||
return next();
|
||||
(err, objMD) => {
|
||||
if (err) {
|
||||
if (err.is.NoSuchKey) {
|
||||
return next();
|
||||
}
|
||||
log.trace('error getting request object tags');
|
||||
return next(err);
|
||||
}
|
||||
log.trace('error getting request object tags');
|
||||
return next(err);
|
||||
}
|
||||
const existingTags = objMD.tags;
|
||||
if (existingTags) {
|
||||
rc.setExistingObjTag(makeTagQuery(existingTags));
|
||||
}
|
||||
return next();
|
||||
});
|
||||
const existingTags = objMD.tags;
|
||||
if (existingTags) {
|
||||
rc.setExistingObjTag(makeTagQuery(existingTags));
|
||||
}
|
||||
return next();
|
||||
});
|
||||
},
|
||||
], err => {
|
||||
if (err) {
|
||||
|
|
|
@ -93,7 +93,7 @@ const _validator = {
|
|||
validateNumberRules(length) {
|
||||
if (length > 100) {
|
||||
return errors.InvalidRequest
|
||||
.customizeDescription(customizedErrs.numberRules);
|
||||
.customizeDescription(customizedErrs.numberRules);
|
||||
}
|
||||
return true;
|
||||
},
|
||||
|
@ -323,20 +323,20 @@ function parseCorsXml(xml, log, cb) {
|
|||
function convertToXml(arrayRules) {
|
||||
const xml = [];
|
||||
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
|
||||
'<CORSConfiguration>');
|
||||
'<CORSConfiguration>');
|
||||
arrayRules.forEach(rule => {
|
||||
xml.push('<CORSRule>');
|
||||
['allowedMethods', 'allowedOrigins', 'allowedHeaders', 'exposeHeaders']
|
||||
.forEach(key => {
|
||||
if (rule[key]) {
|
||||
const element = key.charAt(0).toUpperCase() +
|
||||
.forEach(key => {
|
||||
if (rule[key]) {
|
||||
const element = key.charAt(0).toUpperCase() +
|
||||
key.slice(1, -1);
|
||||
rule[key].forEach(value => {
|
||||
xml.push(`<${element}>${escapeForXml(value)}` +
|
||||
rule[key].forEach(value => {
|
||||
xml.push(`<${element}>${escapeForXml(value)}` +
|
||||
`</${element}>`);
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
});
|
||||
if (rule.id) {
|
||||
xml.push(`<ID>${escapeForXml(rule.id)}</ID>`);
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
|
|||
|
||||
// Get new format usersBucket to see if it exists
|
||||
return metadata.getBucket(usersBucket, log, (err, usersBucketAttrs) => {
|
||||
if (err && !err.NoSuchBucket && !err.BucketAlreadyExists) {
|
||||
if (err && !err.is.NoSuchBucket && !err.is.BucketAlreadyExists) {
|
||||
return cb(err);
|
||||
}
|
||||
const splitter = usersBucketAttrs ?
|
||||
|
@ -36,7 +36,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
|
|||
usersBucket : oldUsersBucket;
|
||||
return metadata.putObjectMD(usersBucketBeingCalled, key,
|
||||
omVal, {}, log, err => {
|
||||
if (err && err.NoSuchBucket) {
|
||||
if (err?.is.NoSuchBucket) {
|
||||
// There must be no usersBucket so createBucket
|
||||
// one using the new format
|
||||
log.trace('users bucket does not exist, ' +
|
||||
|
@ -57,8 +57,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
|
|||
// error with respect
|
||||
// to the usersBucket.
|
||||
if (err &&
|
||||
err !==
|
||||
errors.BucketAlreadyExists) {
|
||||
!err.is.BucketAlreadyExists) {
|
||||
log.error('error from metadata', {
|
||||
error: err,
|
||||
});
|
||||
|
@ -206,7 +205,7 @@ function createBucket(authInfo, bucketName, headers,
|
|||
},
|
||||
getAnyExistingBucketInfo: function getAnyExistingBucketInfo(callback) {
|
||||
metadata.getBucket(bucketName, log, (err, data) => {
|
||||
if (err && err.NoSuchBucket) {
|
||||
if (err?.is.NoSuchBucket) {
|
||||
return callback(null, 'NoBucketYet');
|
||||
}
|
||||
if (err) {
|
||||
|
@ -241,7 +240,7 @@ function createBucket(authInfo, bucketName, headers,
|
|||
'new bucket without flags; adding transient label');
|
||||
newBucketMD.addTransientFlag();
|
||||
return freshStartCreateBucket(newBucketMD, canonicalID,
|
||||
log, cb);
|
||||
log, cb);
|
||||
});
|
||||
}
|
||||
if (existingBucketMD.hasTransientFlag() ||
|
||||
|
|
|
@ -16,7 +16,7 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
|
|||
`${mpuBucketPrefix}${destinationBucketName}`;
|
||||
return metadata.deleteBucket(mpuBucketName, log, err => {
|
||||
// If the mpu bucket does not exist, just move on
|
||||
if (err && err.NoSuchBucket) {
|
||||
if (err?.is.NoSuchBucket) {
|
||||
return cb();
|
||||
}
|
||||
return cb(err);
|
||||
|
@ -90,7 +90,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) {
|
|||
log, (err, objectsListRes) => {
|
||||
// If no shadow bucket ever created, no ongoing MPU's, so
|
||||
// continue with deletion
|
||||
if (err && err.NoSuchBucket) {
|
||||
if (err?.is.NoSuchBucket) {
|
||||
return next();
|
||||
}
|
||||
if (err) {
|
||||
|
|
|
@ -25,9 +25,9 @@ function bucketShield(bucket, requestType) {
|
|||
invisiblyDelete(bucket.getName(), bucket.getOwner());
|
||||
return true;
|
||||
}
|
||||
// If request is initiateMultipartUpload (requestType objectPut),
|
||||
// objectPut, bucketPutACL or bucketDelete, proceed with request.
|
||||
// Otherwise return an error to the client
|
||||
// If request is initiateMultipartUpload (requestType objectPut),
|
||||
// objectPut, bucketPutACL or bucketDelete, proceed with request.
|
||||
// Otherwise return an error to the client
|
||||
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
|
||||
(requestType !== 'objectPut' &&
|
||||
requestType !== 'bucketPutACL' &&
|
||||
|
|
|
@ -385,25 +385,25 @@ function convertToXml(config) {
|
|||
'"http://s3.amazonaws.com/doc/2006-03-01/">');
|
||||
if (indexDocument) {
|
||||
xml.push('<IndexDocument>',
|
||||
`<Suffix>${escapeForXml(indexDocument)}</Suffix>`,
|
||||
'</IndexDocument>');
|
||||
`<Suffix>${escapeForXml(indexDocument)}</Suffix>`,
|
||||
'</IndexDocument>');
|
||||
}
|
||||
if (errorDocument) {
|
||||
xml.push('<ErrorDocument>',
|
||||
`<Key>${escapeForXml(errorDocument)}</Key>`,
|
||||
'</ErrorDocument>');
|
||||
`<Key>${escapeForXml(errorDocument)}</Key>`,
|
||||
'</ErrorDocument>');
|
||||
}
|
||||
if (redirectAllRequestsTo) {
|
||||
xml.push('<RedirectAllRequestsTo>');
|
||||
if (redirectAllRequestsTo.hostName) {
|
||||
xml.push('<HostName>',
|
||||
`${escapeForXml(redirectAllRequestsTo.hostName)}`,
|
||||
'</HostName>');
|
||||
`${escapeForXml(redirectAllRequestsTo.hostName)}`,
|
||||
'</HostName>');
|
||||
}
|
||||
if (redirectAllRequestsTo.protocol) {
|
||||
xml.push('<Protocol>',
|
||||
`${redirectAllRequestsTo.protocol}`,
|
||||
'</Protocol>');
|
||||
`${redirectAllRequestsTo.protocol}`,
|
||||
'</Protocol>');
|
||||
}
|
||||
xml.push('</RedirectAllRequestsTo>');
|
||||
}
|
||||
|
|
|
@ -11,22 +11,22 @@ function deleteUserBucketEntry(bucketName, canonicalID, log, cb) {
|
|||
metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => {
|
||||
// If the object representing the bucket is not in the
|
||||
// users bucket just continue
|
||||
if (error && error.NoSuchKey) {
|
||||
if (error?.is.NoSuchKey) {
|
||||
return cb(null);
|
||||
// BACKWARDS COMPATIBILITY: Remove this once no longer
|
||||
// have old user bucket format
|
||||
} else if (error && error.NoSuchBucket) {
|
||||
} else if (error?.is.NoSuchBucket) {
|
||||
const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
|
||||
oldSplitter, bucketName);
|
||||
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
|
||||
{}, log, error => {
|
||||
if (error && !error.NoSuchKey) {
|
||||
if (error && !error.is.NoSuchKey) {
|
||||
log.error('from metadata while deleting user bucket',
|
||||
{ error });
|
||||
return cb(error);
|
||||
}
|
||||
log.trace('deleted bucket from user bucket',
|
||||
{ method: '_deleteUserBucketEntry' });
|
||||
{ method: '_deleteUserBucketEntry' });
|
||||
return cb(null);
|
||||
});
|
||||
} else if (error) {
|
||||
|
|
|
@ -16,19 +16,19 @@ function invisiblyDelete(bucketName, canonicalID) {
|
|||
return deleteUserBucketEntry(bucketName, canonicalID, log, err => {
|
||||
if (err) {
|
||||
log.error('error invisibly deleting bucket name from user bucket',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
return log.end();
|
||||
}
|
||||
log.trace('deleted bucket name from user bucket');
|
||||
return metadata.deleteBucket(bucketName, log, error => {
|
||||
log.trace('deleting bucket from metadata',
|
||||
{ method: 'invisiblyDelete' });
|
||||
{ method: 'invisiblyDelete' });
|
||||
if (error) {
|
||||
log.error('error deleting bucket from metadata', { error });
|
||||
return log.end();
|
||||
}
|
||||
log.trace('invisible deletion of bucket succeeded',
|
||||
{ method: 'invisiblyDelete' });
|
||||
{ method: 'invisiblyDelete' });
|
||||
return log.end();
|
||||
});
|
||||
});
|
||||
|
|
|
@ -51,7 +51,7 @@ class BackendInfo {
|
|||
static isRequestEndpointPresent(requestEndpoint, log) {
|
||||
if (Object.keys(config.restEndpoints).indexOf(requestEndpoint) < 0) {
|
||||
log.trace('requestEndpoint does not match config restEndpoints',
|
||||
{ requestEndpoint });
|
||||
{ requestEndpoint });
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -69,7 +69,7 @@ class BackendInfo {
|
|||
.restEndpoints[requestEndpoint]) < 0) {
|
||||
log.trace('the default locationConstraint for request' +
|
||||
'Endpoint does not match any config locationConstraint',
|
||||
{ requestEndpoint });
|
||||
{ requestEndpoint });
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -152,7 +152,7 @@ class BackendInfo {
|
|||
return { isValid: true, legacyLocationConstraint };
|
||||
}
|
||||
if (!BackendInfo.isValidRequestEndpointOrBackend(requestEndpoint,
|
||||
log)) {
|
||||
log)) {
|
||||
return { isValid: false, description: 'Endpoint Location Error - ' +
|
||||
`Your endpoint "${requestEndpoint}" is not in restEndpoints ` +
|
||||
'in your config OR the default location constraint for request ' +
|
||||
|
|
|
@ -56,7 +56,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
|||
});
|
||||
},
|
||||
function ifMultipleBackend(mpuBucket, mpuOverviewObj, destBucket,
|
||||
next) {
|
||||
next) {
|
||||
if (config.backends.data === 'multiple') {
|
||||
let location;
|
||||
// if controlling location constraint is not stored in object
|
||||
|
@ -75,13 +75,13 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
|||
location = mpuOverviewObj.controllingLocationConstraint;
|
||||
}
|
||||
return multipleBackendGateway.abortMPU(objectKey, uploadId,
|
||||
location, bucketName, log, (err, skipDataDelete) => {
|
||||
if (err) {
|
||||
return next(err, destBucket);
|
||||
}
|
||||
return next(null, mpuBucket, destBucket,
|
||||
skipDataDelete);
|
||||
});
|
||||
location, bucketName, log, (err, skipDataDelete) => {
|
||||
if (err) {
|
||||
return next(err, destBucket);
|
||||
}
|
||||
return next(null, mpuBucket, destBucket,
|
||||
skipDataDelete);
|
||||
});
|
||||
}
|
||||
return next(null, mpuBucket, destBucket, false);
|
||||
},
|
||||
|
@ -95,7 +95,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
|||
});
|
||||
},
|
||||
function getPartLocations(mpuBucket, destBucket, skipDataDelete,
|
||||
next) {
|
||||
next) {
|
||||
services.getMPUparts(mpuBucket.getName(), uploadId, log,
|
||||
(err, result) => {
|
||||
if (err) {
|
||||
|
@ -103,11 +103,11 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
|||
}
|
||||
const storedParts = result.Contents;
|
||||
return next(null, mpuBucket, storedParts, destBucket,
|
||||
skipDataDelete);
|
||||
skipDataDelete);
|
||||
});
|
||||
},
|
||||
function deleteData(mpuBucket, storedParts, destBucket,
|
||||
skipDataDelete, next) {
|
||||
skipDataDelete, next) {
|
||||
// for Azure we do not need to delete data
|
||||
if (skipDataDelete) {
|
||||
return next(null, mpuBucket, storedParts, destBucket);
|
||||
|
|
|
@ -100,7 +100,7 @@ function findCorsRule(rules, origin, method, headers) {
|
|||
* @return {object} resHeaders - headers to include in response
|
||||
*/
|
||||
function generateCorsResHeaders(rule, origin, method, headers,
|
||||
isPreflight) {
|
||||
isPreflight) {
|
||||
const resHeaders = {
|
||||
'access-control-max-age': rule.maxAgeSeconds,
|
||||
'access-control-allow-methods': rule.allowedMethods.join(', '),
|
||||
|
|
|
@ -84,8 +84,8 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
|||
* result.versionId - unencrypted versionId returned by metadata
|
||||
*/
|
||||
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
|
||||
log, callback) {
|
||||
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
|
||||
log, callback) {
|
||||
const size = isDeleteMarker ? 0 : request.parsedContentLength;
|
||||
// although the request method may actually be 'DELETE' if creating a
|
||||
// delete marker, for our purposes we consider this to be a 'PUT'
|
||||
|
@ -187,10 +187,10 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
|||
|
||||
if (isVersionedObj) {
|
||||
log.debug(externalVersioningErrorMessage,
|
||||
{ method: 'createAndStoreObject', error: errors.NotImplemented });
|
||||
{ method: 'createAndStoreObject', error: errors.NotImplemented });
|
||||
return process.nextTick(() => {
|
||||
callback(errors.NotImplemented.customizeDescription(
|
||||
externalVersioningErrorMessage));
|
||||
externalVersioningErrorMessage));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
@ -208,7 +208,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
|||
return next(null, null, null);
|
||||
}
|
||||
return dataStore(objectKeyContext, cipherBundle, request, size,
|
||||
streamingV4Params, backendInfo, log, next);
|
||||
streamingV4Params, backendInfo, log, next);
|
||||
},
|
||||
function processDataResult(dataGetInfo, calculatedHash, next) {
|
||||
if (dataGetInfo === null || dataGetInfo === undefined) {
|
||||
|
@ -220,8 +220,8 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
|||
const { key, dataStoreName, dataStoreType, dataStoreETag,
|
||||
dataStoreVersionId } = dataGetInfo;
|
||||
const prefixedDataStoreETag = dataStoreETag
|
||||
? `1:${dataStoreETag}`
|
||||
: `1:${calculatedHash}`;
|
||||
? `1:${dataStoreETag}`
|
||||
: `1:${calculatedHash}`;
|
||||
const dataGetInfoArr = [{ key, size, start: 0, dataStoreName,
|
||||
dataStoreType, dataStoreETag: prefixedDataStoreETag,
|
||||
dataStoreVersionId }];
|
||||
|
@ -239,7 +239,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
|||
if (err) {
|
||||
// TODO: check AWS error when user requested a specific
|
||||
// version before any versions have been put
|
||||
const logLvl = err === errors.BadRequest ?
|
||||
const logLvl = err.is.BadRequest ?
|
||||
'debug' : 'error';
|
||||
log[logLvl]('error getting versioning info', {
|
||||
error: err,
|
||||
|
|
|
@ -80,7 +80,7 @@ function _generateExpHeadresMPU(rules, params, datetime) {
|
|||
const date = calculateDate(
|
||||
params.date,
|
||||
rule.DaysAfterInitiation,
|
||||
datetime
|
||||
datetime,
|
||||
);
|
||||
|
||||
return {
|
||||
|
|
|
@ -24,7 +24,7 @@ function locationConstraintCheck(request, metaHeaders, bucket, log) {
|
|||
metaHeaders[constants.objectLocationConstraintHeader];
|
||||
} else {
|
||||
objectLocationConstraint = request
|
||||
.headers[constants.objectLocationConstraintHeader];
|
||||
.headers[constants.objectLocationConstraintHeader];
|
||||
}
|
||||
const bucketLocationConstraint = bucket.getLocationConstraint();
|
||||
const requestEndpoint = request.parsedHost;
|
||||
|
@ -35,7 +35,7 @@ function locationConstraintCheck(request, metaHeaders, bucket, log) {
|
|||
if (!controllingBackend.isValid) {
|
||||
backendInfoObj = {
|
||||
err: errors.InvalidArgument.customizeDescription(controllingBackend.
|
||||
description),
|
||||
description),
|
||||
};
|
||||
return backendInfoObj;
|
||||
}
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
const { errors } = require('arsenal');
|
||||
|
||||
const { config } = require('../../../Config');
|
||||
const { getLocationMetric, pushLocationMetric } =
|
||||
require('../../../utapi/utilities');
|
||||
|
||||
function _gbToBytes(gb) {
|
||||
return gb * 1024 * 1024 * 1024;
|
||||
}
|
||||
|
||||
/**
|
||||
* locationStorageCheck - will ensure there is enough space left for object on
|
||||
* PUT operations, or will update metric on DELETE
|
||||
* NOTE: storage limit may not be exactly enforced in the case of concurrent
|
||||
* requests when near limit
|
||||
* @param {string} location - name of location to check quota
|
||||
* @param {number} updateSize - new size to check against quota in bytes
|
||||
* @param {object} log - werelogs logger
|
||||
* @param {function} cb - callback function
|
||||
* @return {undefined}
|
||||
*/
|
||||
function locationStorageCheck(location, updateSize, log, cb) {
|
||||
const lc = config.locationConstraints;
|
||||
const sizeLimitGB = lc[location] ? lc[location].sizeLimitGB : undefined;
|
||||
if (updateSize === 0 || sizeLimitGB === undefined || sizeLimitGB === null) {
|
||||
return cb();
|
||||
}
|
||||
// no need to list location metric, since it should be decreased
|
||||
if (updateSize < 0) {
|
||||
return pushLocationMetric(location, updateSize, log, cb);
|
||||
}
|
||||
return getLocationMetric(location, log, (err, bytesStored) => {
|
||||
if (err) {
|
||||
log.error(`Error listing metrics from Utapi: ${err.message}`);
|
||||
return cb(err);
|
||||
}
|
||||
const newStorageSize = parseInt(bytesStored, 10) + updateSize;
|
||||
const sizeLimitBytes = _gbToBytes(sizeLimitGB);
|
||||
if (sizeLimitBytes < newStorageSize) {
|
||||
return cb(errors.AccessDenied.customizeDescription(
|
||||
`The assigned storage space limit for location ${location} ` +
|
||||
'will be exceeded'));
|
||||
}
|
||||
return pushLocationMetric(location, updateSize, log, cb);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = locationStorageCheck;
|
|
@ -43,7 +43,7 @@ function validateHeaders(bucket, headers, log) {
|
|||
!(objectLockMode && objectLockDate)) {
|
||||
return errors.InvalidArgument.customizeDescription(
|
||||
'x-amz-object-lock-retain-until-date and ' +
|
||||
'x-amz-object-lock-mode must both be supplied'
|
||||
'x-amz-object-lock-mode must both be supplied',
|
||||
);
|
||||
}
|
||||
const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']);
|
||||
|
|
|
@ -112,7 +112,7 @@ function generateMpuPartStorageInfo(filteredPartList) {
|
|||
* and extraPartLocations
|
||||
*/
|
||||
function validateAndFilterMpuParts(storedParts, jsonList, mpuOverviewKey,
|
||||
splitter, log) {
|
||||
splitter, log) {
|
||||
let storedPartsCopy = [];
|
||||
const filteredPartsObj = {};
|
||||
filteredPartsObj.partList = [];
|
||||
|
|
|
@ -2,7 +2,7 @@ const { errors } = require('arsenal');
|
|||
const {
|
||||
parseRangeSpec,
|
||||
parseRange,
|
||||
} = require('arsenal/lib/network/http/utils');
|
||||
} = require('arsenal').network.http.utils;
|
||||
|
||||
const constants = require('../../../../constants');
|
||||
const setPartRanges = require('./setPartRanges');
|
||||
|
@ -43,7 +43,7 @@ function setUpCopyLocator(sourceObjMD, rangeHeader, log) {
|
|||
// md-model-version 2, need to handle cases where
|
||||
// objMD.location is just a string
|
||||
dataLocator = Array.isArray(sourceObjMD.location) ?
|
||||
sourceObjMD.location : [{ key: sourceObjMD.location }];
|
||||
sourceObjMD.location : [{ key: sourceObjMD.location }];
|
||||
}
|
||||
|
||||
if (sourceObjMD['x-amz-server-side-encryption']) {
|
||||
|
@ -76,7 +76,7 @@ function setUpCopyLocator(sourceObjMD, rangeHeader, log) {
|
|||
log.trace('data model before version 2 so ' +
|
||||
'cannot support get range copy part');
|
||||
return { error: errors.NotImplemented
|
||||
.customizeDescription('Stored object ' +
|
||||
.customizeDescription('Stored object ' +
|
||||
'has legacy data storage model so does' +
|
||||
' not support range headers on copy part'),
|
||||
};
|
||||
|
|
|
@ -81,7 +81,7 @@ function dataStore(objectContext, cipherBundle, stream, size,
|
|||
dataRetrievalInfo,
|
||||
});
|
||||
return checkHashMatchMD5(stream, hashedStream,
|
||||
dataRetrievalInfo, log, cbOnce);
|
||||
dataRetrievalInfo, log, cbOnce);
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ function getVersionIdResHeader(verCfg, objectMD) {
|
|||
return 'null';
|
||||
}
|
||||
return versionIdUtils.encode(objectMD.versionId,
|
||||
config.versionIdEncodingType);
|
||||
config.versionIdEncodingType);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ function _storeNullVersionMD(bucketName, objKey, objMD, options, log, cb) {
|
|||
metadata.putObjectMD(bucketName, objKey, objMD, options, log, err => {
|
||||
if (err) {
|
||||
log.debug('error from metadata storing null version as new version',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
}
|
||||
cb(err, options);
|
||||
});
|
||||
|
@ -121,7 +121,7 @@ function _deleteNullVersionMD(bucketName, objKey, options, mst, log, cb) {
|
|||
err => {
|
||||
if (err) {
|
||||
log.warn('metadata error deleting null version',
|
||||
{ error: err, method: '_deleteNullVersionMD' });
|
||||
{ error: err, method: '_deleteNullVersionMD' });
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, nullDataToDelete);
|
||||
|
@ -292,7 +292,7 @@ function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
|
|||
// it's possible there was a concurrent request to
|
||||
// delete the null version, so proceed with putting a
|
||||
// new version
|
||||
if (err === errors.NoSuchKey) {
|
||||
if (err.is.NoSuchKey) {
|
||||
return next(null, options);
|
||||
}
|
||||
return next(errors.InternalError);
|
||||
|
|
|
@ -113,7 +113,7 @@ function processVersions(bucketName, listParams, list, encType) {
|
|||
xml.push(
|
||||
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||
'<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
||||
'<Name>', bucketName, '</Name>'
|
||||
'<Name>', bucketName, '</Name>',
|
||||
);
|
||||
const isTruncated = list.IsTruncated ? 'true' : 'false';
|
||||
const xmlParams = [
|
||||
|
@ -160,7 +160,7 @@ function processVersions(bucketName, listParams, list, encType) {
|
|||
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
|
||||
'</Owner>',
|
||||
`<StorageClass>${v.StorageClass}</StorageClass>`,
|
||||
v.IsDeleteMarker ? '</DeleteMarker>' : '</Version>'
|
||||
v.IsDeleteMarker ? '</DeleteMarker>' : '</Version>',
|
||||
);
|
||||
});
|
||||
list.CommonPrefixes.forEach(item => {
|
||||
|
@ -176,7 +176,7 @@ function processMasterVersions(bucketName, listParams, list) {
|
|||
xml.push(
|
||||
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||
'<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
||||
'<Name>', bucketName, '</Name>'
|
||||
'<Name>', bucketName, '</Name>',
|
||||
);
|
||||
const isTruncated = list.IsTruncated ? 'true' : 'false';
|
||||
const xmlParams = [
|
||||
|
@ -234,19 +234,19 @@ function processMasterVersions(bucketName, listParams, list) {
|
|||
`<Key>${objectKey}</Key>`,
|
||||
`<LastModified>${v.LastModified}</LastModified>`,
|
||||
`<ETag>"${v.ETag}"</ETag>`,
|
||||
`<Size>${v.Size}</Size>`
|
||||
`<Size>${v.Size}</Size>`,
|
||||
);
|
||||
if (!listParams.v2 || listParams.fetchOwner) {
|
||||
xml.push(
|
||||
'<Owner>',
|
||||
`<ID>${v.Owner.ID}</ID>`,
|
||||
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
|
||||
'</Owner>'
|
||||
'</Owner>',
|
||||
);
|
||||
}
|
||||
return xml.push(
|
||||
`<StorageClass>${v.StorageClass}</StorageClass>`,
|
||||
'</Contents>'
|
||||
'</Contents>',
|
||||
);
|
||||
});
|
||||
list.CommonPrefixes.forEach(item => {
|
||||
|
@ -266,7 +266,7 @@ function handleResult(listParams, requestMaxKeys, encoding, authInfo,
|
|||
let res;
|
||||
if (listParams.listingType === 'DelimiterVersions') {
|
||||
res = processVersions(bucketName, listParams, list,
|
||||
config.versionIdEncodingType);
|
||||
config.versionIdEncodingType);
|
||||
} else {
|
||||
res = processMasterVersions(bucketName, listParams, list);
|
||||
}
|
||||
|
@ -364,14 +364,14 @@ function bucketGet(authInfo, request, log, callback) {
|
|||
bucketName, emptyList, corsHeaders, log, callback);
|
||||
}
|
||||
return services.getObjectListing(bucketName, listParams, log,
|
||||
(err, list) => {
|
||||
if (err) {
|
||||
log.debug('error processing request', { error: err });
|
||||
return callback(err, null, corsHeaders);
|
||||
}
|
||||
return handleResult(listParams, requestMaxKeys, encoding, authInfo,
|
||||
bucketName, list, corsHeaders, log, callback);
|
||||
});
|
||||
(err, list) => {
|
||||
if (err) {
|
||||
log.debug('error processing request', { error: err });
|
||||
return callback(err, null, corsHeaders);
|
||||
}
|
||||
return handleResult(listParams, requestMaxKeys, encoding, authInfo,
|
||||
bucketName, list, corsHeaders, log, callback);
|
||||
});
|
||||
});
|
||||
return undefined;
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ function bucketGetEncryption(authInfo, request, log, callback) {
|
|||
'</ApplyServerSideEncryptionByDefault>',
|
||||
'<BucketKeyEnabled>false</BucketKeyEnabled>',
|
||||
'</Rule>',
|
||||
'</ServerSideEncryptionConfiguration>'
|
||||
'</ServerSideEncryptionConfiguration>',
|
||||
);
|
||||
|
||||
pushMetric('getBucketEncryption', log, {
|
||||
|
|
|
@ -46,10 +46,10 @@ function bucketGetLocation(authInfo, request, log, callback) {
|
|||
|
||||
let locationConstraint = bucket.getLocationConstraint();
|
||||
if (!locationConstraint || locationConstraint === 'us-east-1') {
|
||||
// AWS returns empty string if no region has been
|
||||
// provided or for us-east-1
|
||||
// Note: AWS JS SDK sends a request with locationConstraint us-east-1
|
||||
// if no locationConstraint provided.
|
||||
// AWS returns empty string if no region has been
|
||||
// provided or for us-east-1
|
||||
// Note: AWS JS SDK sends a request with locationConstraint us-east-1
|
||||
// if no locationConstraint provided.
|
||||
locationConstraint = '';
|
||||
}
|
||||
const xml = `<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
|
|
@ -19,8 +19,8 @@ function convertToXml(versioningConfiguration) {
|
|||
const xml = [];
|
||||
|
||||
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
|
||||
'<VersioningConfiguration ' +
|
||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
|
||||
'<VersioningConfiguration ' +
|
||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
||||
);
|
||||
|
||||
if (versioningConfiguration && versioningConfiguration.Status) {
|
||||
|
|
|
@ -50,9 +50,9 @@ function checkLocationConstraint(request, locationConstraint, log) {
|
|||
`${locationConstraintChecked} - is not listed in the ` +
|
||||
'locationConstraint config';
|
||||
log.trace(`locationConstraint is invalid - ${errMsg}`,
|
||||
{ locationConstraint: locationConstraintChecked });
|
||||
{ locationConstraint: locationConstraintChecked });
|
||||
return { error: errors.InvalidLocationConstraint.
|
||||
customizeDescription(errMsg) };
|
||||
customizeDescription(errMsg) };
|
||||
}
|
||||
return { error: null, locationConstraint: locationConstraintChecked };
|
||||
}
|
||||
|
@ -80,7 +80,7 @@ function _parseXML(request, log, cb) {
|
|||
log.trace('location constraint',
|
||||
{ locationConstraint });
|
||||
const locationCheck = checkLocationConstraint(request,
|
||||
locationConstraint, log);
|
||||
locationConstraint, log);
|
||||
if (locationCheck.error) {
|
||||
return cb(locationCheck.error);
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ function _parseXML(request, log, cb) {
|
|||
}
|
||||
return process.nextTick(() => {
|
||||
const locationCheck = checkLocationConstraint(request,
|
||||
undefined, log);
|
||||
undefined, log);
|
||||
if (locationCheck.error) {
|
||||
return cb(locationCheck.error);
|
||||
}
|
||||
|
@ -167,20 +167,20 @@ function bucketPut(authInfo, request, log, callback) {
|
|||
return next(null, locationConstraint);
|
||||
},
|
||||
(locationConstraint, next) => createBucket(authInfo, bucketName,
|
||||
request.headers, locationConstraint, log, (err, previousBucket) => {
|
||||
// if bucket already existed, gather any relevant cors
|
||||
// headers
|
||||
const corsHeaders = collectCorsHeaders(
|
||||
request.headers.origin, request.method, previousBucket);
|
||||
if (err) {
|
||||
return next(err, corsHeaders);
|
||||
}
|
||||
pushMetric('createBucket', log, {
|
||||
authInfo,
|
||||
bucket: bucketName,
|
||||
});
|
||||
return next(null, corsHeaders);
|
||||
}),
|
||||
request.headers, locationConstraint, log, (err, previousBucket) => {
|
||||
// if bucket already existed, gather any relevant cors
|
||||
// headers
|
||||
const corsHeaders = collectCorsHeaders(
|
||||
request.headers.origin, request.method, previousBucket);
|
||||
if (err) {
|
||||
return next(err, corsHeaders);
|
||||
}
|
||||
pushMetric('createBucket', log, {
|
||||
authInfo,
|
||||
bucket: bucketName,
|
||||
});
|
||||
return next(null, corsHeaders);
|
||||
}),
|
||||
], callback);
|
||||
}
|
||||
|
||||
|
|
|
@ -103,16 +103,16 @@ function bucketPutACL(authInfo, request, log, callback) {
|
|||
return async.waterfall([
|
||||
function waterfall1(next) {
|
||||
metadataValidateBucket(metadataValParams, log,
|
||||
(err, bucket) => {
|
||||
if (err) {
|
||||
log.trace('request authorization failed', {
|
||||
error: err,
|
||||
method: 'metadataValidateBucket',
|
||||
});
|
||||
return next(err, bucket);
|
||||
}
|
||||
return next(null, bucket);
|
||||
});
|
||||
(err, bucket) => {
|
||||
if (err) {
|
||||
log.trace('request authorization failed', {
|
||||
error: err,
|
||||
method: 'metadataValidateBucket',
|
||||
});
|
||||
return next(err, bucket);
|
||||
}
|
||||
return next(null, bucket);
|
||||
});
|
||||
},
|
||||
function waterfall2(bucket, next) {
|
||||
// If not setting acl through headers, parse body
|
||||
|
@ -179,7 +179,7 @@ function bucketPutACL(authInfo, request, log, callback) {
|
|||
if (!skip && granteeType === 'Group') {
|
||||
if (possibleGroups.indexOf(grantee.URI[0]) < 0) {
|
||||
log.trace('invalid user group',
|
||||
{ userGroup: grantee.URI[0] });
|
||||
{ userGroup: grantee.URI[0] });
|
||||
return next(errors.InvalidArgument, bucket);
|
||||
}
|
||||
return usersIdentifiedByGroup.push({
|
||||
|
@ -195,15 +195,15 @@ function bucketPutACL(authInfo, request, log, callback) {
|
|||
// through the access headers
|
||||
const allGrantHeaders =
|
||||
[].concat(grantReadHeader, grantWriteHeader,
|
||||
grantReadACPHeader, grantWriteACPHeader,
|
||||
grantFullControlHeader);
|
||||
grantReadACPHeader, grantWriteACPHeader,
|
||||
grantFullControlHeader);
|
||||
|
||||
usersIdentifiedByEmail = allGrantHeaders.filter(item =>
|
||||
item && item.userIDType.toLowerCase() === 'emailaddress');
|
||||
|
||||
usersIdentifiedByGroup = allGrantHeaders
|
||||
.filter(itm => itm && itm.userIDType
|
||||
.toLowerCase() === 'uri');
|
||||
.toLowerCase() === 'uri');
|
||||
for (let i = 0; i < usersIdentifiedByGroup.length; i++) {
|
||||
const userGroup = usersIdentifiedByGroup[i].identifier;
|
||||
if (possibleGroups.indexOf(userGroup) < 0) {
|
||||
|
|
|
@ -27,7 +27,7 @@ function bucketPutCors(authInfo, request, log, callback) {
|
|||
|
||||
if (!request.post) {
|
||||
log.debug('CORS xml body is missing',
|
||||
{ error: errors.MissingRequestBodyError });
|
||||
{ error: errors.MissingRequestBodyError });
|
||||
return callback(errors.MissingRequestBodyError);
|
||||
}
|
||||
|
||||
|
|
|
@ -105,7 +105,7 @@ function bucketPutVersioning(authInfo, request, log, callback) {
|
|||
if (!_checkBackendVersioningImplemented(bucket)) {
|
||||
log.debug(externalVersioningErrorMessage,
|
||||
{ method: 'bucketPutVersioning',
|
||||
error: errors.NotImplemented });
|
||||
error: errors.NotImplemented });
|
||||
const error = errors.NotImplemented.customizeDescription(
|
||||
externalVersioningErrorMessage);
|
||||
return next(error, bucket);
|
||||
|
|
|
@ -50,7 +50,7 @@ const REPLICATION_ACTION = 'MPU';
|
|||
*/
|
||||
|
||||
|
||||
/*
|
||||
/*
|
||||
Format of xml response:
|
||||
<?xml version='1.0' encoding='UTF-8'?>
|
||||
<CompleteMultipartUploadResult
|
||||
|
@ -137,7 +137,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
|||
});
|
||||
},
|
||||
function parsePartsList(destBucket, objMD, mpuBucket,
|
||||
storedMetadata, next) {
|
||||
storedMetadata, next) {
|
||||
const location = storedMetadata.controllingLocationConstraint;
|
||||
// BACKWARD: Remove to remove the old splitter
|
||||
if (mpuBucket.getMdBucketModelVersion() < 2) {
|
||||
|
@ -159,7 +159,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
|||
return next(errors.MalformedXML, destBucket);
|
||||
},
|
||||
function markOverviewForCompletion(destBucket, objMD, mpuBucket, jsonList,
|
||||
storedMetadata, location, mpuOverviewKey, next) {
|
||||
storedMetadata, location, mpuOverviewKey, next) {
|
||||
return services.metadataMarkMPObjectForCompletion({
|
||||
bucketName: mpuBucket.getName(),
|
||||
objectKey,
|
||||
|
@ -171,11 +171,11 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
|||
return next(err);
|
||||
}
|
||||
return next(null, destBucket, objMD, mpuBucket,
|
||||
jsonList, storedMetadata, location, mpuOverviewKey);
|
||||
jsonList, storedMetadata, location, mpuOverviewKey);
|
||||
});
|
||||
},
|
||||
function retrieveParts(destBucket, objMD, mpuBucket, jsonList,
|
||||
storedMetadata, location, mpuOverviewKey, next) {
|
||||
storedMetadata, location, mpuOverviewKey, next) {
|
||||
return services.getMPUparts(mpuBucket.getName(), uploadId, log,
|
||||
(err, result) => {
|
||||
if (err) {
|
||||
|
@ -187,7 +187,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
|||
});
|
||||
},
|
||||
function ifMultipleBackend(destBucket, objMD, mpuBucket, storedParts,
|
||||
jsonList, storedMetadata, location, mpuOverviewKey, next) {
|
||||
jsonList, storedMetadata, location, mpuOverviewKey, next) {
|
||||
if (config.backends.data === 'multiple') {
|
||||
// if mpu was initiated in legacy version
|
||||
if (location === undefined) {
|
||||
|
@ -203,26 +203,26 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
|||
}
|
||||
const mdInfo = { storedParts, mpuOverviewKey, splitter };
|
||||
return multipleBackendGateway.completeMPU(objectKey,
|
||||
uploadId, location, jsonList, mdInfo, bucketName, null, null,
|
||||
log, (err, completeObjData) => {
|
||||
if (err) {
|
||||
return next(err, destBucket);
|
||||
}
|
||||
return next(null, destBucket, objMD, mpuBucket, storedParts,
|
||||
jsonList, storedMetadata, completeObjData,
|
||||
mpuOverviewKey);
|
||||
});
|
||||
uploadId, location, jsonList, mdInfo, bucketName, null, null,
|
||||
log, (err, completeObjData) => {
|
||||
if (err) {
|
||||
return next(err, destBucket);
|
||||
}
|
||||
return next(null, destBucket, objMD, mpuBucket, storedParts,
|
||||
jsonList, storedMetadata, completeObjData,
|
||||
mpuOverviewKey);
|
||||
});
|
||||
}
|
||||
return next(null, destBucket, objMD, mpuBucket, storedParts,
|
||||
jsonList, storedMetadata, null, mpuOverviewKey);
|
||||
},
|
||||
function validateAndFilterParts(destBucket, objMD, mpuBucket,
|
||||
storedParts, jsonList, storedMetadata, completeObjData, mpuOverviewKey,
|
||||
next) {
|
||||
storedParts, jsonList, storedMetadata, completeObjData, mpuOverviewKey,
|
||||
next) {
|
||||
if (completeObjData) {
|
||||
return next(null, destBucket, objMD, mpuBucket, storedParts,
|
||||
jsonList, storedMetadata, completeObjData, mpuOverviewKey,
|
||||
completeObjData.filteredPartsObj);
|
||||
jsonList, storedMetadata, completeObjData, mpuOverviewKey,
|
||||
completeObjData.filteredPartsObj);
|
||||
}
|
||||
const filteredPartsObj = validateAndFilterMpuParts(storedParts,
|
||||
jsonList, mpuOverviewKey, splitter, log);
|
||||
|
@ -234,8 +234,8 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
|||
filteredPartsObj);
|
||||
},
|
||||
function processParts(destBucket, objMD, mpuBucket, storedParts,
|
||||
jsonList, storedMetadata, completeObjData, mpuOverviewKey,
|
||||
filteredPartsObj, next) {
|
||||
jsonList, storedMetadata, completeObjData, mpuOverviewKey,
|
||||
filteredPartsObj, next) {
|
||||
// if mpu was completed on backend that stored mpu MD externally,
|
||||
// skip MD processing steps
|
||||
if (completeObjData && skipMpuPartProcessing(completeObjData)) {
|
||||
|
@ -341,7 +341,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
|||
if (err) {
|
||||
// TODO: check AWS error when user requested a specific
|
||||
// version before any versions have been put
|
||||
const logLvl = err === errors.BadRequest ?
|
||||
const logLvl = err.is.BadRequest ?
|
||||
'debug' : 'error';
|
||||
log[logLvl]('error getting versioning info', {
|
||||
error: err,
|
||||
|
@ -428,10 +428,10 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
|||
if (dataToDelete) {
|
||||
const newDataStoreName =
|
||||
Array.isArray(dataLocations) && dataLocations[0] ?
|
||||
dataLocations[0].dataStoreName : null;
|
||||
dataLocations[0].dataStoreName : null;
|
||||
const delLog =
|
||||
logger.newRequestLoggerFromSerializedUids(log
|
||||
.getSerializedUids());
|
||||
.getSerializedUids());
|
||||
return data.batchDelete(dataToDelete,
|
||||
request.method,
|
||||
newDataStoreName, delLog, err => {
|
||||
|
@ -481,7 +481,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
|||
if (generatedVersionId) {
|
||||
corsHeaders['x-amz-version-id'] =
|
||||
versionIdUtils.encode(generatedVersionId,
|
||||
config.versionIdEncodingType);
|
||||
config.versionIdEncodingType);
|
||||
}
|
||||
Object.assign(responseHeaders, corsHeaders);
|
||||
|
||||
|
|
|
@ -132,7 +132,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
|||
}
|
||||
|
||||
function _getMPUBucket(destinationBucket, log, corsHeaders,
|
||||
uploadId, cipherBundle, locConstraint, callback) {
|
||||
uploadId, cipherBundle, locConstraint, callback) {
|
||||
const xmlParams = {
|
||||
bucketName,
|
||||
objectKey,
|
||||
|
@ -228,45 +228,45 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
|||
let uploadId;
|
||||
if (config.backends.data === 'multiple') {
|
||||
return multipleBackendGateway.createMPU(objectKey, metaHeaders,
|
||||
bucketName, websiteRedirectHeader, locConstraint, undefined,
|
||||
undefined, undefined, undefined, tagging, log,
|
||||
(err, dataBackendResObj) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
if (locConstraint &&
|
||||
bucketName, websiteRedirectHeader, locConstraint, undefined,
|
||||
undefined, undefined, undefined, tagging, log,
|
||||
(err, dataBackendResObj) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
if (locConstraint &&
|
||||
config.locationConstraints[locConstraint] &&
|
||||
config.locationConstraints[locConstraint].type &&
|
||||
constants.versioningNotImplBackends[config
|
||||
.locationConstraints[locConstraint].type]
|
||||
) {
|
||||
const vcfg = destinationBucket.getVersioningConfiguration();
|
||||
const isVersionedObj = vcfg && vcfg.Status === 'Enabled';
|
||||
if (isVersionedObj) {
|
||||
log.debug(externalVersioningErrorMessage,
|
||||
{ method: 'initiateMultipartUpload',
|
||||
error: errors.NotImplemented });
|
||||
return callback(errors.NotImplemented
|
||||
.customizeDescription(externalVersioningErrorMessage));
|
||||
) {
|
||||
const vcfg = destinationBucket.getVersioningConfiguration();
|
||||
const isVersionedObj = vcfg && vcfg.Status === 'Enabled';
|
||||
if (isVersionedObj) {
|
||||
log.debug(externalVersioningErrorMessage,
|
||||
{ method: 'initiateMultipartUpload',
|
||||
error: errors.NotImplemented });
|
||||
return callback(errors.NotImplemented
|
||||
.customizeDescription(externalVersioningErrorMessage));
|
||||
}
|
||||
}
|
||||
}
|
||||
if (dataBackendResObj) {
|
||||
if (dataBackendResObj) {
|
||||
// dataBackendResObj will be returned in data backend
|
||||
// handles mpu
|
||||
uploadId = dataBackendResObj.UploadId;
|
||||
} else {
|
||||
uploadId = uuidv4().replace(/-/g, '');
|
||||
}
|
||||
uploadId = dataBackendResObj.UploadId;
|
||||
} else {
|
||||
uploadId = uuidv4().replace(/-/g, '');
|
||||
}
|
||||
|
||||
return _getMPUBucket(destinationBucket, log, corsHeaders,
|
||||
uploadId, cipherBundle, locConstraint, callback);
|
||||
});
|
||||
return _getMPUBucket(destinationBucket, log, corsHeaders,
|
||||
uploadId, cipherBundle, locConstraint, callback);
|
||||
});
|
||||
}
|
||||
// Generate uniqueID without dashes so that routing not messed up
|
||||
uploadId = uuidv4().replace(/-/g, '');
|
||||
|
||||
return _getMPUBucket(destinationBucket, log, corsHeaders,
|
||||
uploadId, cipherBundle, locConstraint, callback);
|
||||
uploadId, cipherBundle, locConstraint, callback);
|
||||
}
|
||||
|
||||
async.waterfall([
|
||||
|
@ -291,24 +291,24 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
|||
if (destinationBucket.hasTransientFlag() || destinationBucket.hasDeletedFlag()) {
|
||||
log.trace('transient or deleted flag so cleaning up bucket');
|
||||
return cleanUpBucket(
|
||||
destinationBucket,
|
||||
accountCanonicalID,
|
||||
log,
|
||||
error => {
|
||||
if (error) {
|
||||
log.debug('error cleaning up bucket with flag',
|
||||
{
|
||||
error,
|
||||
transientFlag: destinationBucket.hasTransientFlag(),
|
||||
deletedFlag: destinationBucket.hasDeletedFlag(),
|
||||
});
|
||||
// To avoid confusing user with error
|
||||
// from cleaning up
|
||||
// bucket return InternalError
|
||||
return next(errors.InternalError, corsHeaders);
|
||||
}
|
||||
return next(null, corsHeaders, destinationBucket);
|
||||
});
|
||||
destinationBucket,
|
||||
accountCanonicalID,
|
||||
log,
|
||||
error => {
|
||||
if (error) {
|
||||
log.debug('error cleaning up bucket with flag',
|
||||
{
|
||||
error,
|
||||
transientFlag: destinationBucket.hasTransientFlag(),
|
||||
deletedFlag: destinationBucket.hasDeletedFlag(),
|
||||
});
|
||||
// To avoid confusing user with error
|
||||
// from cleaning up
|
||||
// bucket return InternalError
|
||||
return next(errors.InternalError, corsHeaders);
|
||||
}
|
||||
return next(null, corsHeaders, destinationBucket);
|
||||
});
|
||||
}
|
||||
return next(null, corsHeaders, destinationBucket);
|
||||
},
|
||||
|
@ -326,15 +326,15 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
|||
return next(error, corsHeaders);
|
||||
}
|
||||
return next(null, corsHeaders, destinationBucket, objectSSEConfig);
|
||||
}
|
||||
},
|
||||
),
|
||||
],
|
||||
(error, corsHeaders, destinationBucket, objectSSEConfig) => {
|
||||
if (error) {
|
||||
return callback(error, null, corsHeaders);
|
||||
}
|
||||
return _storetheMPObject(destinationBucket, corsHeaders, objectSSEConfig);
|
||||
(error, corsHeaders, destinationBucket, objectSSEConfig) => {
|
||||
if (error) {
|
||||
return callback(error, null, corsHeaders);
|
||||
}
|
||||
return _storetheMPObject(destinationBucket, corsHeaders, objectSSEConfig);
|
||||
},
|
||||
);
|
||||
return undefined;
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ function listParts(authInfo, request, log, callback) {
|
|||
}
|
||||
const partNumberMarker =
|
||||
Number.parseInt(request.query['part-number-marker'], 10) ?
|
||||
Number.parseInt(request.query['part-number-marker'], 10) : 0;
|
||||
Number.parseInt(request.query['part-number-marker'], 10) : 0;
|
||||
const metadataValMPUparams = {
|
||||
authInfo,
|
||||
bucketName,
|
||||
|
@ -160,22 +160,22 @@ function listParts(authInfo, request, log, callback) {
|
|||
location = mpuOverviewObj.controllingLocationConstraint;
|
||||
}
|
||||
return multipleBackendGateway.listParts(objectKey, uploadId,
|
||||
location, bucketName, partNumberMarker, maxParts, log,
|
||||
(err, backendPartList) => {
|
||||
if (err) {
|
||||
return next(err, destBucket);
|
||||
} else if (backendPartList) {
|
||||
return next(null, destBucket, mpuBucket,
|
||||
mpuOverviewObj, backendPartList);
|
||||
}
|
||||
return next(null, destBucket, mpuBucket, mpuOverviewObj,
|
||||
null);
|
||||
});
|
||||
location, bucketName, partNumberMarker, maxParts, log,
|
||||
(err, backendPartList) => {
|
||||
if (err) {
|
||||
return next(err, destBucket);
|
||||
} else if (backendPartList) {
|
||||
return next(null, destBucket, mpuBucket,
|
||||
mpuOverviewObj, backendPartList);
|
||||
}
|
||||
return next(null, destBucket, mpuBucket, mpuOverviewObj,
|
||||
null);
|
||||
});
|
||||
}
|
||||
return next(null, destBucket, mpuBucket, mpuOverviewObj, null);
|
||||
},
|
||||
function waterfall4(destBucket, mpuBucket, mpuOverviewObj,
|
||||
backendPartList, next) {
|
||||
backendPartList, next) {
|
||||
// if parts were returned from cloud backend, they were not
|
||||
// stored in Scality S3 metadata, so this step can be skipped
|
||||
if (backendPartList) {
|
||||
|
@ -195,13 +195,13 @@ function listParts(authInfo, request, log, callback) {
|
|||
splitter,
|
||||
};
|
||||
return services.getSomeMPUparts(getPartsParams,
|
||||
(err, storedParts) => {
|
||||
if (err) {
|
||||
return next(err, destBucket, null);
|
||||
}
|
||||
return next(null, destBucket, mpuBucket, storedParts,
|
||||
mpuOverviewObj);
|
||||
});
|
||||
(err, storedParts) => {
|
||||
if (err) {
|
||||
return next(err, destBucket, null);
|
||||
}
|
||||
return next(null, destBucket, mpuBucket, storedParts,
|
||||
mpuOverviewObj);
|
||||
});
|
||||
}, function waterfall5(destBucket, mpuBucket, storedParts,
|
||||
mpuOverviewObj, next) {
|
||||
const encodingFn = encoding === 'url'
|
||||
|
@ -245,7 +245,7 @@ function listParts(authInfo, request, log, callback) {
|
|||
xml.push(
|
||||
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||
'<ListPartsResult xmlns="http://s3.amazonaws.com/doc/' +
|
||||
'2006-03-01/">'
|
||||
'2006-03-01/">',
|
||||
);
|
||||
buildXML([
|
||||
{ tag: 'Bucket', value: bucketName },
|
||||
|
|
|
@ -40,7 +40,7 @@ const versionIdUtils = versioning.VersionID;
|
|||
*/
|
||||
|
||||
|
||||
/*
|
||||
/*
|
||||
Format of xml response:
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
|
@ -72,19 +72,19 @@ function _formatXML(quietSetting, errorResults, deleted) {
|
|||
let errorXML = [];
|
||||
errorResults.forEach(errorObj => {
|
||||
errorXML.push(
|
||||
'<Error>',
|
||||
'<Key>', escapeForXml(errorObj.entry.key), '</Key>',
|
||||
'<Code>', escapeForXml(errorObj.error.message), '</Code>');
|
||||
'<Error>',
|
||||
'<Key>', escapeForXml(errorObj.entry.key), '</Key>',
|
||||
'<Code>', escapeForXml(errorObj.error.message), '</Code>');
|
||||
if (errorObj.entry.versionId) {
|
||||
const version = errorObj.entry.versionId === 'null' ?
|
||||
'null' : escapeForXml(errorObj.entry.versionId);
|
||||
errorXML.push('<VersionId>', version, '</VersionId>');
|
||||
}
|
||||
errorXML.push(
|
||||
'<Message>',
|
||||
escapeForXml(errorObj.error.description),
|
||||
'</Message>',
|
||||
'</Error>'
|
||||
'<Message>',
|
||||
escapeForXml(errorObj.error.description),
|
||||
'</Message>',
|
||||
'</Error>',
|
||||
);
|
||||
});
|
||||
errorXML = errorXML.join('');
|
||||
|
@ -110,13 +110,13 @@ function _formatXML(quietSetting, errorResults, deleted) {
|
|||
'<Deleted>',
|
||||
'<Key>',
|
||||
escapeForXml(version.entry.key),
|
||||
'</Key>'
|
||||
'</Key>',
|
||||
);
|
||||
if (version.entry.versionId) {
|
||||
deletedXML.push(
|
||||
'<VersionId>',
|
||||
escapeForXml(version.entry.versionId),
|
||||
'</VersionId>'
|
||||
'</VersionId>',
|
||||
);
|
||||
}
|
||||
if (isDeleteMarker) {
|
||||
|
@ -126,7 +126,7 @@ function _formatXML(quietSetting, errorResults, deleted) {
|
|||
'</DeleteMarker>',
|
||||
'<DeleteMarkerVersionId>',
|
||||
deleteMarkerVersionId,
|
||||
'</DeleteMarkerVersionId>'
|
||||
'</DeleteMarkerVersionId>',
|
||||
);
|
||||
}
|
||||
deletedXML.push('</Deleted>');
|
||||
|
@ -183,7 +183,7 @@ function _parseXml(xmlToParse, next) {
|
|||
* successfullyDeleted, totalContentLengthDeleted)
|
||||
*/
|
||||
function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||
bucketName, bucket, quietSetting, errorResults, inPlay, log, next) {
|
||||
bucketName, bucket, quietSetting, errorResults, inPlay, log, next) {
|
||||
const successfullyDeleted = [];
|
||||
let totalContentLengthDeleted = 0;
|
||||
let numOfObjectsRemoved = 0;
|
||||
|
@ -210,10 +210,10 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
|||
(versionId, callback) => metadataGetObject(bucketName, entry.key,
|
||||
versionId, log, (err, objMD) => {
|
||||
// if general error from metadata return error
|
||||
if (err && !err.NoSuchKey) {
|
||||
if (err && !err.is.NoSuchKey) {
|
||||
return callback(err);
|
||||
}
|
||||
if (err && err.NoSuchKey) {
|
||||
if (err?.is.NoSuchKey) {
|
||||
const verCfg = bucket.getVersioningConfiguration();
|
||||
// To adhere to AWS behavior, create a delete marker
|
||||
// if trying to delete an object that does not exist
|
||||
|
@ -237,22 +237,22 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
|||
}),
|
||||
(objMD, versionId, callback) =>
|
||||
preprocessingVersioningDelete(bucketName, bucket, objMD,
|
||||
versionId, log, (err, options) => callback(err, options,
|
||||
objMD)),
|
||||
versionId, log, (err, options) => callback(err, options,
|
||||
objMD)),
|
||||
(options, objMD, callback) => {
|
||||
const deleteInfo = {};
|
||||
if (options && options.deleteData) {
|
||||
deleteInfo.deleted = true;
|
||||
return services.deleteObject(bucketName, objMD,
|
||||
entry.key, options, log, err =>
|
||||
callback(err, objMD, deleteInfo));
|
||||
callback(err, objMD, deleteInfo));
|
||||
}
|
||||
deleteInfo.newDeleteMarker = true;
|
||||
// This call will create a delete-marker
|
||||
return createAndStoreObject(bucketName, bucket, entry.key,
|
||||
objMD, authInfo, canonicalID, null, request,
|
||||
deleteInfo.newDeleteMarker, null, log, (err, result) =>
|
||||
callback(err, objMD, deleteInfo, result.versionId));
|
||||
callback(err, objMD, deleteInfo, result.versionId));
|
||||
},
|
||||
], (err, objMD, deleteInfo, versionId) => {
|
||||
if (err === skipError) {
|
||||
|
@ -386,7 +386,7 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
|||
return vault.checkPolicies(requestContextParams, authInfo.getArn(),
|
||||
log, (err, authorizationResults) => {
|
||||
// there were no policies so received a blanket AccessDenied
|
||||
if (err && err.AccessDenied) {
|
||||
if (err?.is.AccessDenied) {
|
||||
objects.forEach(entry => {
|
||||
errorResults.push({
|
||||
entry,
|
||||
|
@ -482,8 +482,8 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
|||
function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay,
|
||||
bucket, next) {
|
||||
return getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||
bucketName, bucket, quietSetting, errorResults, inPlay,
|
||||
log, next);
|
||||
bucketName, bucket, quietSetting, errorResults, inPlay,
|
||||
log, next);
|
||||
},
|
||||
], (err, quietSetting, errorResults, numOfObjectsRemoved,
|
||||
successfullyDeleted, totalContentLengthDeleted, bucket) => {
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
const { errors } = require('arsenal');
|
||||
|
||||
const abortMultipartUpload = require('./apiUtils/object/abortMultipartUpload');
|
||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||
const isLegacyAWSBehavior = require('../utilities/legacyAWSBehavior');
|
||||
|
@ -29,10 +27,10 @@ function multipartDelete(authInfo, request, log, callback) {
|
|||
request.method, destinationBucket);
|
||||
const location = destinationBucket ?
|
||||
destinationBucket.getLocationConstraint() : null;
|
||||
if (err && err !== errors.NoSuchUpload) {
|
||||
if (err && !err.is.NoSuchUpload) {
|
||||
return callback(err, corsHeaders);
|
||||
}
|
||||
if (err === errors.NoSuchUpload && isLegacyAWSBehavior(location)) {
|
||||
if (err?.is.NoSuchUpload && isLegacyAWSBehavior(location)) {
|
||||
log.trace('did not find valid mpu with uploadId', {
|
||||
method: 'multipartDelete',
|
||||
uploadId,
|
||||
|
|
|
@ -60,7 +60,7 @@ function _prepMetadata(request, sourceObjMD, headers, sourceIsDestination,
|
|||
whichTagging = whichTagging === undefined ? 'COPY' : whichTagging;
|
||||
if (whichTagging !== 'COPY' && whichTagging !== 'REPLACE') {
|
||||
return { error: errors.InvalidArgument
|
||||
.customizeDescription('Unknown tagging directive') };
|
||||
.customizeDescription('Unknown tagging directive') };
|
||||
}
|
||||
const overrideMetadata = {};
|
||||
if (headers['x-amz-server-side-encryption']) {
|
||||
|
@ -185,7 +185,7 @@ function _prepMetadata(request, sourceObjMD, headers, sourceIsDestination,
|
|||
storeMetadataParams.contentType = sourceObjMD['content-type'];
|
||||
}
|
||||
return { storeMetadataParams, sourceLocationConstraintName,
|
||||
backendInfoDest: backendInfoObjDest.backendInfo };
|
||||
backendInfoDest: backendInfoObjDest.backendInfo };
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -249,7 +249,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
|||
(err, destBucketMD, destObjMD) => {
|
||||
if (err) {
|
||||
log.debug('error validating put part of request',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
return next(err, destBucketMD);
|
||||
}
|
||||
const flag = destBucketMD.hasDeletedFlag()
|
||||
|
@ -267,7 +267,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
|||
(err, sourceBucketMD, sourceObjMD) => {
|
||||
if (err) {
|
||||
log.debug('error validating get part of request',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
return next(err, null, destBucketMD);
|
||||
}
|
||||
if (!sourceObjMD) {
|
||||
|
@ -278,10 +278,10 @@ function objectCopy(authInfo, request, sourceBucket,
|
|||
}
|
||||
if (sourceObjMD.isDeleteMarker) {
|
||||
log.debug('delete marker on source object',
|
||||
{ sourceObject });
|
||||
{ sourceObject });
|
||||
if (sourceVersionId) {
|
||||
const err = errors.InvalidRequest
|
||||
.customizeDescription('The source of a copy ' +
|
||||
.customizeDescription('The source of a copy ' +
|
||||
'request may not specifically refer to a delete' +
|
||||
'marker by version id.');
|
||||
return next(err, destBucketMD);
|
||||
|
@ -293,13 +293,13 @@ function objectCopy(authInfo, request, sourceBucket,
|
|||
}
|
||||
const headerValResult =
|
||||
validateHeaders(request.headers,
|
||||
sourceObjMD['last-modified'],
|
||||
sourceObjMD['content-md5']);
|
||||
sourceObjMD['last-modified'],
|
||||
sourceObjMD['content-md5']);
|
||||
if (headerValResult.error) {
|
||||
return next(errors.PreconditionFailed, destBucketMD);
|
||||
}
|
||||
const { storeMetadataParams, error: metadataError,
|
||||
sourceLocationConstraintName, backendInfoDest } =
|
||||
sourceLocationConstraintName, backendInfoDest } =
|
||||
_prepMetadata(request, sourceObjMD, request.headers,
|
||||
sourceIsDestination, authInfo, destObjectKey,
|
||||
sourceBucketMD, destBucketMD, sourceVersionId, log);
|
||||
|
@ -319,7 +319,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
|||
// md-model-version 2, need to handle cases where
|
||||
// objMD.location is just a string
|
||||
dataLocator = Array.isArray(sourceObjMD.location) ?
|
||||
sourceObjMD.location : [{ key: sourceObjMD.location }];
|
||||
sourceObjMD.location : [{ key: sourceObjMD.location }];
|
||||
}
|
||||
|
||||
if (sourceObjMD['x-amz-server-side-encryption']) {
|
||||
|
@ -339,13 +339,13 @@ function objectCopy(authInfo, request, sourceBucket,
|
|||
destBucketMD, destObjMD, sourceLocationConstraintName,
|
||||
backendInfoDest, next) {
|
||||
getObjectSSEConfiguration(
|
||||
request.headers,
|
||||
destBucketMD,
|
||||
log,
|
||||
(err, sseConfig) =>
|
||||
next(err, storeMetadataParams, dataLocator, sourceBucketMD,
|
||||
destBucketMD, destObjMD, sourceLocationConstraintName,
|
||||
backendInfoDest, sseConfig));
|
||||
request.headers,
|
||||
destBucketMD,
|
||||
log,
|
||||
(err, sseConfig) =>
|
||||
next(err, storeMetadataParams, dataLocator, sourceBucketMD,
|
||||
destBucketMD, destObjMD, sourceLocationConstraintName,
|
||||
backendInfoDest, sseConfig));
|
||||
},
|
||||
function goGetData(storeMetadataParams, dataLocator, sourceBucketMD,
|
||||
destBucketMD, destObjMD, sourceLocationConstraintName,
|
||||
|
@ -380,7 +380,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
|||
{ method: 'multipleBackendGateway',
|
||||
error: errors.NotImplemented });
|
||||
return next(errors.NotImplemented.customizeDescription(
|
||||
externalVersioningErrorMessage), destBucketMD);
|
||||
externalVersioningErrorMessage), destBucketMD);
|
||||
}
|
||||
if (dataLocator.length === 0) {
|
||||
if (!storeMetadataParams.locationMatch &&
|
||||
|
@ -409,15 +409,15 @@ function objectCopy(authInfo, request, sourceBucket,
|
|||
serverSideEncryption, destBucketMD);
|
||||
}
|
||||
return data.copyObject(request, sourceLocationConstraintName,
|
||||
storeMetadataParams, dataLocator, dataStoreContext,
|
||||
backendInfoDest, sourceBucketMD, destBucketMD, serverSideEncryption, log,
|
||||
(err, results) => {
|
||||
if (err) {
|
||||
return next(err, destBucketMD);
|
||||
}
|
||||
return next(null, storeMetadataParams, results,
|
||||
destObjMD, serverSideEncryption, destBucketMD);
|
||||
});
|
||||
storeMetadataParams, dataLocator, dataStoreContext,
|
||||
backendInfoDest, sourceBucketMD, destBucketMD, serverSideEncryption, log,
|
||||
(err, results) => {
|
||||
if (err) {
|
||||
return next(err, destBucketMD);
|
||||
}
|
||||
return next(null, storeMetadataParams, results,
|
||||
destObjMD, serverSideEncryption, destBucketMD);
|
||||
});
|
||||
},
|
||||
function getVersioningInfo(storeMetadataParams, destDataGetInfoArr,
|
||||
destObjMD, serverSideEncryption, destBucketMD, next) {
|
||||
|
@ -426,7 +426,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
|||
(err, options) => {
|
||||
if (err) {
|
||||
log.debug('error processing versioning info',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
return next(err, null, destBucketMD);
|
||||
}
|
||||
// eslint-disable-next-line
|
||||
|
@ -494,8 +494,8 @@ function objectCopy(authInfo, request, sourceBucket,
|
|||
{ error: err });
|
||||
}
|
||||
next(null,
|
||||
storingNewMdResult, destBucketMD, storeMetadataParams,
|
||||
serverSideEncryption, sourceObjSize, destObjPrevSize);
|
||||
storingNewMdResult, destBucketMD, storeMetadataParams,
|
||||
serverSideEncryption, sourceObjSize, destObjPrevSize);
|
||||
});
|
||||
}
|
||||
return next(null,
|
||||
|
@ -524,20 +524,20 @@ function objectCopy(authInfo, request, sourceBucket,
|
|||
serverSideEncryption.algorithm;
|
||||
if (serverSideEncryption.algorithm === 'aws:kms') {
|
||||
additionalHeaders[
|
||||
'x-amz-server-side-encryption-aws-kms-key-id'] =
|
||||
'x-amz-server-side-encryption-aws-kms-key-id'] =
|
||||
serverSideEncryption.masterKeyId;
|
||||
}
|
||||
}
|
||||
if (sourceVersionId) {
|
||||
additionalHeaders['x-amz-copy-source-version-id'] =
|
||||
versionIdUtils.encode(sourceVersionId,
|
||||
config.versionIdEncodingType);
|
||||
config.versionIdEncodingType);
|
||||
}
|
||||
const isVersioned = storingNewMdResult && storingNewMdResult.versionId;
|
||||
if (isVersioned) {
|
||||
additionalHeaders['x-amz-version-id'] =
|
||||
versionIdUtils.encode(storingNewMdResult.versionId,
|
||||
config.versionIdEncodingType);
|
||||
config.versionIdEncodingType);
|
||||
}
|
||||
|
||||
Object.assign(responseHeaders, additionalHeaders);
|
||||
|
|
|
@ -55,49 +55,49 @@ function objectDelete(authInfo, request, log, cb) {
|
|||
return async.waterfall([
|
||||
function validateBucketAndObj(next) {
|
||||
return metadataValidateBucketAndObj(valParams, log,
|
||||
(err, bucketMD, objMD) => {
|
||||
if (err) {
|
||||
return next(err, bucketMD);
|
||||
}
|
||||
(err, bucketMD, objMD) => {
|
||||
if (err) {
|
||||
return next(err, bucketMD);
|
||||
}
|
||||
|
||||
const versioningCfg = bucketMD.getVersioningConfiguration();
|
||||
if (!objMD) {
|
||||
if (!versioningCfg) {
|
||||
return next(errors.NoSuchKey, bucketMD);
|
||||
}
|
||||
// AWS does not return an error when trying to delete a
|
||||
// specific version that does not exist. We skip to the end
|
||||
// of the waterfall here.
|
||||
if (reqVersionId) {
|
||||
log.debug('trying to delete specific version ' +
|
||||
const versioningCfg = bucketMD.getVersioningConfiguration();
|
||||
if (!objMD) {
|
||||
if (!versioningCfg) {
|
||||
return next(errors.NoSuchKey, bucketMD);
|
||||
}
|
||||
// AWS does not return an error when trying to delete a
|
||||
// specific version that does not exist. We skip to the end
|
||||
// of the waterfall here.
|
||||
if (reqVersionId) {
|
||||
log.debug('trying to delete specific version ' +
|
||||
' that does not exist');
|
||||
return next(errors.NoSuchVersion, bucketMD);
|
||||
return next(errors.NoSuchVersion, bucketMD);
|
||||
}
|
||||
// To adhere to AWS behavior, create a delete marker even
|
||||
// if trying to delete an object that does not exist when
|
||||
// versioning has been configured
|
||||
return next(null, bucketMD, objMD);
|
||||
}
|
||||
// To adhere to AWS behavior, create a delete marker even
|
||||
// if trying to delete an object that does not exist when
|
||||
// versioning has been configured
|
||||
return next(null, bucketMD, objMD);
|
||||
}
|
||||
// AWS only returns an object lock error if a version id
|
||||
// is specified, else continue to create a delete marker
|
||||
if (reqVersionId &&
|
||||
// AWS only returns an object lock error if a version id
|
||||
// is specified, else continue to create a delete marker
|
||||
if (reqVersionId &&
|
||||
isObjectLocked(bucketMD, objMD, request.headers)) {
|
||||
log.debug('trying to delete locked object');
|
||||
return next(objectLockedError, bucketMD);
|
||||
}
|
||||
if (reqVersionId && objMD.location &&
|
||||
log.debug('trying to delete locked object');
|
||||
return next(objectLockedError, bucketMD);
|
||||
}
|
||||
if (reqVersionId && objMD.location &&
|
||||
Array.isArray(objMD.location) && objMD.location[0]) {
|
||||
// we need this information for data deletes to AWS
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
objMD.location[0].deleteVersion = true;
|
||||
}
|
||||
if (objMD['content-length'] !== undefined) {
|
||||
log.end().addDefaultFields({
|
||||
bytesDeleted: objMD['content-length'],
|
||||
});
|
||||
}
|
||||
return next(null, bucketMD, objMD);
|
||||
});
|
||||
objMD.location[0].deleteVersion = true;
|
||||
}
|
||||
if (objMD['content-length'] !== undefined) {
|
||||
log.end().addDefaultFields({
|
||||
bytesDeleted: objMD['content-length'],
|
||||
});
|
||||
}
|
||||
return next(null, bucketMD, objMD);
|
||||
});
|
||||
},
|
||||
function getVersioningInfo(bucketMD, objectMD, next) {
|
||||
return preprocessingVersioningDelete(bucketName,
|
||||
|
@ -105,7 +105,7 @@ function objectDelete(authInfo, request, log, cb) {
|
|||
(err, options) => {
|
||||
if (err) {
|
||||
log.error('err processing versioning info',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
return next(err, bucketMD);
|
||||
}
|
||||
return next(null, bucketMD, objectMD, options);
|
||||
|
@ -124,14 +124,14 @@ function objectDelete(authInfo, request, log, cb) {
|
|||
}
|
||||
return services.deleteObject(bucketName, objectMD, objectKey,
|
||||
delOptions, log, (err, delResult) => next(err, bucketMD,
|
||||
objectMD, delResult, deleteInfo));
|
||||
objectMD, delResult, deleteInfo));
|
||||
}
|
||||
// putting a new delete marker
|
||||
deleteInfo.newDeleteMarker = true;
|
||||
return createAndStoreObject(bucketName, bucketMD,
|
||||
objectKey, objectMD, authInfo, canonicalID, null, request,
|
||||
deleteInfo.newDeleteMarker, null, log, (err, newDelMarkerRes) =>
|
||||
next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo));
|
||||
next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo));
|
||||
},
|
||||
], (err, bucketMD, objectMD, result, deleteInfo) => {
|
||||
const resHeaders = collectCorsHeaders(request.headers.origin,
|
||||
|
|
|
@ -48,26 +48,26 @@ function objectDeleteTagging(authInfo, request, log, callback) {
|
|||
|
||||
return async.waterfall([
|
||||
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||
(err, bucket, objectMD) => {
|
||||
if (err) {
|
||||
log.trace('request authorization failed',
|
||||
{ method: 'objectDeleteTagging', error: err });
|
||||
return next(err);
|
||||
}
|
||||
if (!objectMD) {
|
||||
const err = reqVersionId ? errors.NoSuchVersion :
|
||||
errors.NoSuchKey;
|
||||
log.trace('error no object metadata found',
|
||||
{ method: 'objectDeleteTagging', error: err });
|
||||
return next(err, bucket);
|
||||
}
|
||||
if (objectMD.isDeleteMarker) {
|
||||
log.trace('version is a delete marker',
|
||||
{ method: 'objectDeleteTagging' });
|
||||
return next(errors.MethodNotAllowed, bucket);
|
||||
}
|
||||
return next(null, bucket, objectMD);
|
||||
}),
|
||||
(err, bucket, objectMD) => {
|
||||
if (err) {
|
||||
log.trace('request authorization failed',
|
||||
{ method: 'objectDeleteTagging', error: err });
|
||||
return next(err);
|
||||
}
|
||||
if (!objectMD) {
|
||||
const err = reqVersionId ? errors.NoSuchVersion :
|
||||
errors.NoSuchKey;
|
||||
log.trace('error no object metadata found',
|
||||
{ method: 'objectDeleteTagging', error: err });
|
||||
return next(err, bucket);
|
||||
}
|
||||
if (objectMD.isDeleteMarker) {
|
||||
log.trace('version is a delete marker',
|
||||
{ method: 'objectDeleteTagging' });
|
||||
return next(errors.MethodNotAllowed, bucket);
|
||||
}
|
||||
return next(null, bucket, objectMD);
|
||||
}),
|
||||
(bucket, objectMD, next) => {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
objectMD.tags = {};
|
||||
|
@ -81,13 +81,13 @@ function objectDeleteTagging(authInfo, request, log, callback) {
|
|||
objectMD.replicationInfo, replicationInfo);
|
||||
}
|
||||
metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params,
|
||||
log, err =>
|
||||
next(err, bucket, objectMD));
|
||||
log, err =>
|
||||
next(err, bucket, objectMD));
|
||||
},
|
||||
(bucket, objectMD, next) => {
|
||||
if (config.backends.data === 'multiple') {
|
||||
return multipleBackendGateway.objectTagging('Delete', objectKey,
|
||||
bucket, objectMD, log, err => next(err, bucket, objectMD));
|
||||
bucket, objectMD, log, err => next(err, bucket, objectMD));
|
||||
}
|
||||
return next(null, bucket, objectMD);
|
||||
},
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
const { errors, s3middleware } = require('arsenal');
|
||||
const { parseRange } = require('arsenal/lib/network/http/utils');
|
||||
const { parseRange } = require('arsenal').network.http.utils;
|
||||
|
||||
const data = require('../data/wrapper');
|
||||
|
||||
|
@ -49,187 +49,187 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
|
|||
};
|
||||
|
||||
return metadataValidateBucketAndObj(mdValParams, log,
|
||||
(err, bucket, objMD) => {
|
||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||
request.method, bucket);
|
||||
if (err) {
|
||||
log.debug('error processing request', {
|
||||
error: err,
|
||||
method: 'metadataValidateBucketAndObj',
|
||||
});
|
||||
return callback(err, null, corsHeaders);
|
||||
}
|
||||
if (!objMD) {
|
||||
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
|
||||
return callback(err, null, corsHeaders);
|
||||
}
|
||||
const verCfg = bucket.getVersioningConfiguration();
|
||||
if (objMD.isDeleteMarker) {
|
||||
const responseMetaHeaders = Object.assign({},
|
||||
{ 'x-amz-delete-marker': true }, corsHeaders);
|
||||
if (!versionId) {
|
||||
return callback(errors.NoSuchKey, null, responseMetaHeaders);
|
||||
(err, bucket, objMD) => {
|
||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||
request.method, bucket);
|
||||
if (err) {
|
||||
log.debug('error processing request', {
|
||||
error: err,
|
||||
method: 'metadataValidateBucketAndObj',
|
||||
});
|
||||
return callback(err, null, corsHeaders);
|
||||
}
|
||||
// return MethodNotAllowed if requesting a specific
|
||||
// version that has a delete marker
|
||||
responseMetaHeaders['x-amz-version-id'] =
|
||||
if (!objMD) {
|
||||
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
|
||||
return callback(err, null, corsHeaders);
|
||||
}
|
||||
const verCfg = bucket.getVersioningConfiguration();
|
||||
if (objMD.isDeleteMarker) {
|
||||
const responseMetaHeaders = Object.assign({},
|
||||
{ 'x-amz-delete-marker': true }, corsHeaders);
|
||||
if (!versionId) {
|
||||
return callback(errors.NoSuchKey, null, responseMetaHeaders);
|
||||
}
|
||||
// return MethodNotAllowed if requesting a specific
|
||||
// version that has a delete marker
|
||||
responseMetaHeaders['x-amz-version-id'] =
|
||||
getVersionIdResHeader(verCfg, objMD);
|
||||
return callback(errors.MethodNotAllowed, null,
|
||||
responseMetaHeaders);
|
||||
}
|
||||
const headerValResult = validateHeaders(request.headers,
|
||||
objMD['last-modified'], objMD['content-md5']);
|
||||
if (headerValResult.error) {
|
||||
return callback(headerValResult.error, null, corsHeaders);
|
||||
}
|
||||
const responseMetaHeaders = collectResponseHeaders(objMD,
|
||||
corsHeaders, verCfg, returnTagCount);
|
||||
|
||||
setExpirationHeaders(responseMetaHeaders, {
|
||||
lifecycleConfig: bucket.getLifecycleConfiguration(),
|
||||
objectParams: {
|
||||
key: objectKey,
|
||||
tags: objMD.tags,
|
||||
date: objMD['last-modified'],
|
||||
},
|
||||
isVersionedReq: !!versionId,
|
||||
});
|
||||
|
||||
const objLength = (objMD.location === null ?
|
||||
0 : parseInt(objMD['content-length'], 10));
|
||||
let byteRange;
|
||||
const streamingParams = {};
|
||||
if (request.headers.range) {
|
||||
const { range, error } = parseRange(request.headers.range,
|
||||
objLength);
|
||||
if (error) {
|
||||
return callback(error, null, corsHeaders);
|
||||
return callback(errors.MethodNotAllowed, null,
|
||||
responseMetaHeaders);
|
||||
}
|
||||
responseMetaHeaders['Accept-Ranges'] = 'bytes';
|
||||
if (range) {
|
||||
byteRange = range;
|
||||
// End of range should be included so + 1
|
||||
responseMetaHeaders['Content-Length'] =
|
||||
const headerValResult = validateHeaders(request.headers,
|
||||
objMD['last-modified'], objMD['content-md5']);
|
||||
if (headerValResult.error) {
|
||||
return callback(headerValResult.error, null, corsHeaders);
|
||||
}
|
||||
const responseMetaHeaders = collectResponseHeaders(objMD,
|
||||
corsHeaders, verCfg, returnTagCount);
|
||||
|
||||
setExpirationHeaders(responseMetaHeaders, {
|
||||
lifecycleConfig: bucket.getLifecycleConfiguration(),
|
||||
objectParams: {
|
||||
key: objectKey,
|
||||
tags: objMD.tags,
|
||||
date: objMD['last-modified'],
|
||||
},
|
||||
isVersionedReq: !!versionId,
|
||||
});
|
||||
|
||||
const objLength = (objMD.location === null ?
|
||||
0 : parseInt(objMD['content-length'], 10));
|
||||
let byteRange;
|
||||
const streamingParams = {};
|
||||
if (request.headers.range) {
|
||||
const { range, error } = parseRange(request.headers.range,
|
||||
objLength);
|
||||
if (error) {
|
||||
return callback(error, null, corsHeaders);
|
||||
}
|
||||
responseMetaHeaders['Accept-Ranges'] = 'bytes';
|
||||
if (range) {
|
||||
byteRange = range;
|
||||
// End of range should be included so + 1
|
||||
responseMetaHeaders['Content-Length'] =
|
||||
range[1] - range[0] + 1;
|
||||
responseMetaHeaders['Content-Range'] =
|
||||
responseMetaHeaders['Content-Range'] =
|
||||
`bytes ${range[0]}-${range[1]}/${objLength}`;
|
||||
streamingParams.rangeStart = range[0] ?
|
||||
range[0].toString() : undefined;
|
||||
streamingParams.rangeEnd = range[1] ?
|
||||
range[1].toString() : undefined;
|
||||
streamingParams.rangeStart = range[0] ?
|
||||
range[0].toString() : undefined;
|
||||
streamingParams.rangeEnd = range[1] ?
|
||||
range[1].toString() : undefined;
|
||||
}
|
||||
}
|
||||
}
|
||||
let dataLocator = null;
|
||||
if (objMD.location !== null) {
|
||||
let dataLocator = null;
|
||||
if (objMD.location !== null) {
|
||||
// To provide for backwards compatibility before
|
||||
// md-model-version 2, need to handle cases where
|
||||
// objMD.location is just a string
|
||||
dataLocator = Array.isArray(objMD.location) ?
|
||||
objMD.location : [{ key: objMD.location }];
|
||||
// if the data backend is azure, there will only ever be at
|
||||
// most one item in the dataLocator array
|
||||
if (dataLocator[0] && dataLocator[0].dataStoreType === 'azure') {
|
||||
dataLocator[0].azureStreamingOptions = streamingParams;
|
||||
}
|
||||
dataLocator = Array.isArray(objMD.location) ?
|
||||
objMD.location : [{ key: objMD.location }];
|
||||
// if the data backend is azure, there will only ever be at
|
||||
// most one item in the dataLocator array
|
||||
if (dataLocator[0] && dataLocator[0].dataStoreType === 'azure') {
|
||||
dataLocator[0].azureStreamingOptions = streamingParams;
|
||||
}
|
||||
|
||||
let partNumber = null;
|
||||
if (request.query && request.query.partNumber !== undefined) {
|
||||
if (byteRange) {
|
||||
const error = errors.InvalidRequest
|
||||
.customizeDescription('Cannot specify both Range ' +
|
||||
let partNumber = null;
|
||||
if (request.query && request.query.partNumber !== undefined) {
|
||||
if (byteRange) {
|
||||
const error = errors.InvalidRequest
|
||||
.customizeDescription('Cannot specify both Range ' +
|
||||
'header and partNumber query parameter.');
|
||||
return callback(error, null, corsHeaders);
|
||||
}
|
||||
partNumber = Number.parseInt(request.query.partNumber, 10);
|
||||
if (Number.isNaN(partNumber)) {
|
||||
const error = errors.InvalidArgument
|
||||
.customizeDescription('Part number must be a number.');
|
||||
return callback(error, null, corsHeaders);
|
||||
}
|
||||
if (partNumber < 1 || partNumber > 10000) {
|
||||
const error = errors.InvalidArgument
|
||||
.customizeDescription('Part number must be an ' +
|
||||
return callback(error, null, corsHeaders);
|
||||
}
|
||||
partNumber = Number.parseInt(request.query.partNumber, 10);
|
||||
if (Number.isNaN(partNumber)) {
|
||||
const error = errors.InvalidArgument
|
||||
.customizeDescription('Part number must be a number.');
|
||||
return callback(error, null, corsHeaders);
|
||||
}
|
||||
if (partNumber < 1 || partNumber > 10000) {
|
||||
const error = errors.InvalidArgument
|
||||
.customizeDescription('Part number must be an ' +
|
||||
'integer between 1 and 10000, inclusive.');
|
||||
return callback(error, null, corsHeaders);
|
||||
return callback(error, null, corsHeaders);
|
||||
}
|
||||
}
|
||||
}
|
||||
// If have a data model before version 2, cannot support
|
||||
// get range for objects with multiple parts
|
||||
if (byteRange && dataLocator.length > 1 &&
|
||||
// If have a data model before version 2, cannot support
|
||||
// get range for objects with multiple parts
|
||||
if (byteRange && dataLocator.length > 1 &&
|
||||
dataLocator[0].start === undefined) {
|
||||
return callback(errors.NotImplemented, null, corsHeaders);
|
||||
}
|
||||
if (objMD['x-amz-server-side-encryption']) {
|
||||
for (let i = 0; i < dataLocator.length; i++) {
|
||||
dataLocator[i].masterKeyId =
|
||||
objMD['x-amz-server-side-encryption-aws-kms-key-id'];
|
||||
dataLocator[i].algorithm =
|
||||
objMD['x-amz-server-side-encryption'];
|
||||
return callback(errors.NotImplemented, null, corsHeaders);
|
||||
}
|
||||
}
|
||||
if (partNumber) {
|
||||
const locations = [];
|
||||
let locationPartNumber;
|
||||
for (let i = 0; i < objMD.location.length; i++) {
|
||||
const { dataStoreETag } = objMD.location[i];
|
||||
if (objMD['x-amz-server-side-encryption']) {
|
||||
for (let i = 0; i < dataLocator.length; i++) {
|
||||
dataLocator[i].masterKeyId =
|
||||
objMD['x-amz-server-side-encryption-aws-kms-key-id'];
|
||||
dataLocator[i].algorithm =
|
||||
objMD['x-amz-server-side-encryption'];
|
||||
}
|
||||
}
|
||||
if (partNumber) {
|
||||
const locations = [];
|
||||
let locationPartNumber;
|
||||
for (let i = 0; i < objMD.location.length; i++) {
|
||||
const { dataStoreETag } = objMD.location[i];
|
||||
|
||||
if (dataStoreETag) {
|
||||
locationPartNumber =
|
||||
if (dataStoreETag) {
|
||||
locationPartNumber =
|
||||
Number.parseInt(dataStoreETag.split(':')[0], 10);
|
||||
} else {
|
||||
} else {
|
||||
/**
|
||||
* Location objects prior to GA7.1 do not include the
|
||||
* dataStoreETag field so we cannot find the part range,
|
||||
* the objects are treated as if they only have 1 part
|
||||
*/
|
||||
locationPartNumber = 1;
|
||||
}
|
||||
locationPartNumber = 1;
|
||||
}
|
||||
|
||||
// Get all parts that belong to the requested part number
|
||||
if (partNumber === locationPartNumber) {
|
||||
locations.push(objMD.location[i]);
|
||||
} else if (locationPartNumber > partNumber) {
|
||||
break;
|
||||
// Get all parts that belong to the requested part number
|
||||
if (partNumber === locationPartNumber) {
|
||||
locations.push(objMD.location[i]);
|
||||
} else if (locationPartNumber > partNumber) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (locations.length === 0) {
|
||||
return callback(errors.InvalidPartNumber, null,
|
||||
corsHeaders);
|
||||
}
|
||||
const { start } = locations[0];
|
||||
const endLocation = locations[locations.length - 1];
|
||||
const end = endLocation.start + endLocation.size - 1;
|
||||
responseMetaHeaders['Content-Length'] = end - start + 1;
|
||||
const partByteRange = [start, end];
|
||||
dataLocator = setPartRanges(dataLocator, partByteRange);
|
||||
const partsCount = getPartCountFromMd5(objMD);
|
||||
if (partsCount) {
|
||||
responseMetaHeaders['x-amz-mp-parts-count'] =
|
||||
if (locations.length === 0) {
|
||||
return callback(errors.InvalidPartNumber, null,
|
||||
corsHeaders);
|
||||
}
|
||||
const { start } = locations[0];
|
||||
const endLocation = locations[locations.length - 1];
|
||||
const end = endLocation.start + endLocation.size - 1;
|
||||
responseMetaHeaders['Content-Length'] = end - start + 1;
|
||||
const partByteRange = [start, end];
|
||||
dataLocator = setPartRanges(dataLocator, partByteRange);
|
||||
const partsCount = getPartCountFromMd5(objMD);
|
||||
if (partsCount) {
|
||||
responseMetaHeaders['x-amz-mp-parts-count'] =
|
||||
partsCount;
|
||||
}
|
||||
} else {
|
||||
dataLocator = setPartRanges(dataLocator, byteRange);
|
||||
}
|
||||
} else {
|
||||
dataLocator = setPartRanges(dataLocator, byteRange);
|
||||
}
|
||||
}
|
||||
return data.head(dataLocator, log, err => {
|
||||
if (err) {
|
||||
log.error('error from external backend checking for ' +
|
||||
return data.head(dataLocator, log, err => {
|
||||
if (err) {
|
||||
log.error('error from external backend checking for ' +
|
||||
'object existence', { error: err });
|
||||
return callback(err);
|
||||
}
|
||||
pushMetric('getObject', log, {
|
||||
authInfo,
|
||||
bucket: bucketName,
|
||||
keys: [objectKey],
|
||||
newByteLength:
|
||||
return callback(err);
|
||||
}
|
||||
pushMetric('getObject', log, {
|
||||
authInfo,
|
||||
bucket: bucketName,
|
||||
keys: [objectKey],
|
||||
newByteLength:
|
||||
Number.parseInt(responseMetaHeaders['Content-Length'], 10),
|
||||
versionId: objMD.versionId,
|
||||
location: objMD.dataStoreName,
|
||||
versionId: objMD.versionId,
|
||||
location: objMD.dataStoreName,
|
||||
});
|
||||
return callback(null, dataLocator, responseMetaHeaders,
|
||||
byteRange);
|
||||
});
|
||||
return callback(null, dataLocator, responseMetaHeaders,
|
||||
byteRange);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = objectGet;
|
||||
|
|
|
@ -47,7 +47,7 @@ function objectGetLegalHold(authInfo, request, log, callback) {
|
|||
(err, bucket, objectMD) => {
|
||||
if (err) {
|
||||
log.trace('request authorization failed',
|
||||
{ method: 'objectGetLegalHold', error: err });
|
||||
{ method: 'objectGetLegalHold', error: err });
|
||||
return next(err);
|
||||
}
|
||||
if (!objectMD) {
|
||||
|
|
|
@ -44,31 +44,31 @@ function objectGetTagging(authInfo, request, log, callback) {
|
|||
|
||||
return async.waterfall([
|
||||
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||
(err, bucket, objectMD) => {
|
||||
if (err) {
|
||||
log.trace('request authorization failed',
|
||||
{ method: 'objectGetTagging', error: err });
|
||||
return next(err);
|
||||
}
|
||||
if (!objectMD) {
|
||||
const err = reqVersionId ? errors.NoSuchVersion :
|
||||
errors.NoSuchKey;
|
||||
log.trace('error no object metadata found',
|
||||
{ method: 'objectGetTagging', error: err });
|
||||
return next(err, bucket);
|
||||
}
|
||||
if (objectMD.isDeleteMarker) {
|
||||
if (reqVersionId) {
|
||||
log.trace('requested version is delete marker',
|
||||
{ method: 'objectGetTagging' });
|
||||
return next(errors.MethodNotAllowed);
|
||||
}
|
||||
log.trace('most recent version is delete marker',
|
||||
{ method: 'objectGetTagging' });
|
||||
return next(errors.NoSuchKey);
|
||||
}
|
||||
return next(null, bucket, objectMD);
|
||||
}),
|
||||
(err, bucket, objectMD) => {
|
||||
if (err) {
|
||||
log.trace('request authorization failed',
|
||||
{ method: 'objectGetTagging', error: err });
|
||||
return next(err);
|
||||
}
|
||||
if (!objectMD) {
|
||||
const err = reqVersionId ? errors.NoSuchVersion :
|
||||
errors.NoSuchKey;
|
||||
log.trace('error no object metadata found',
|
||||
{ method: 'objectGetTagging', error: err });
|
||||
return next(err, bucket);
|
||||
}
|
||||
if (objectMD.isDeleteMarker) {
|
||||
if (reqVersionId) {
|
||||
log.trace('requested version is delete marker',
|
||||
{ method: 'objectGetTagging' });
|
||||
return next(errors.MethodNotAllowed);
|
||||
}
|
||||
log.trace('most recent version is delete marker',
|
||||
{ method: 'objectGetTagging' });
|
||||
return next(errors.NoSuchKey);
|
||||
}
|
||||
return next(null, bucket, objectMD);
|
||||
}),
|
||||
(bucket, objectMD, next) => {
|
||||
const tags = objectMD.tags;
|
||||
const xml = convertToXml(tags);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
const { errors, s3middleware } = require('arsenal');
|
||||
const validateHeaders = s3middleware.validateConditionalHeaders;
|
||||
const { parseRange } = require('arsenal/lib/network/http/utils');
|
||||
const { parseRange } = require('arsenal').network.http.utils;
|
||||
|
||||
const { decodeVersionId } = require('./apiUtils/object/versioning');
|
||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||
|
|
|
@ -70,115 +70,115 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
|
|||
log.trace('owner canonicalID to send to data', { canonicalID });
|
||||
|
||||
return metadataValidateBucketAndObj(valParams, log,
|
||||
(err, bucket, objMD) => {
|
||||
const responseHeaders = collectCorsHeaders(headers.origin,
|
||||
method, bucket);
|
||||
if (err) {
|
||||
log.trace('error processing request', {
|
||||
error: err,
|
||||
method: 'metadataValidateBucketAndObj',
|
||||
});
|
||||
return callback(err, responseHeaders);
|
||||
}
|
||||
if (bucket.hasDeletedFlag() && canonicalID !== bucket.getOwner()) {
|
||||
log.trace('deleted flag on bucket and request ' +
|
||||
'from non-owner account');
|
||||
return callback(errors.NoSuchBucket);
|
||||
}
|
||||
|
||||
return async.waterfall([
|
||||
function handleTransientOrDeleteBuckets(next) {
|
||||
if (bucket.hasTransientFlag() || bucket.hasDeletedFlag()) {
|
||||
return cleanUpBucket(bucket, canonicalID, log, next);
|
||||
}
|
||||
return next();
|
||||
},
|
||||
function getSSEConfig(next) {
|
||||
return getObjectSSEConfiguration(headers, bucket, log,
|
||||
(err, sseConfig) => {
|
||||
if (err) {
|
||||
log.error('error getting server side encryption config', { err });
|
||||
return next(invalidSSEError);
|
||||
}
|
||||
return next(null, sseConfig);
|
||||
}
|
||||
);
|
||||
},
|
||||
function createCipherBundle(serverSideEncryptionConfig, next) {
|
||||
if (serverSideEncryptionConfig) {
|
||||
return kms.createCipherBundle(
|
||||
serverSideEncryptionConfig, log, next);
|
||||
}
|
||||
return next(null, null);
|
||||
},
|
||||
function objectCreateAndStore(cipherBundle, next) {
|
||||
const objectLockValidationError
|
||||
= validateHeaders(bucket, headers, log);
|
||||
if (objectLockValidationError) {
|
||||
return next(objectLockValidationError);
|
||||
}
|
||||
writeContinue(request, request._response);
|
||||
return createAndStoreObject(bucketName,
|
||||
bucket, objectKey, objMD, authInfo, canonicalID, cipherBundle,
|
||||
request, false, streamingV4Params, log, next);
|
||||
},
|
||||
], (err, storingResult) => {
|
||||
(err, bucket, objMD) => {
|
||||
const responseHeaders = collectCorsHeaders(headers.origin,
|
||||
method, bucket);
|
||||
if (err) {
|
||||
log.trace('error processing request', {
|
||||
error: err,
|
||||
method: 'metadataValidateBucketAndObj',
|
||||
});
|
||||
return callback(err, responseHeaders);
|
||||
}
|
||||
const newByteLength = parsedContentLength;
|
||||
if (bucket.hasDeletedFlag() && canonicalID !== bucket.getOwner()) {
|
||||
log.trace('deleted flag on bucket and request ' +
|
||||
'from non-owner account');
|
||||
return callback(errors.NoSuchBucket);
|
||||
}
|
||||
|
||||
setExpirationHeaders(responseHeaders, {
|
||||
lifecycleConfig: bucket.getLifecycleConfiguration(),
|
||||
objectParams: {
|
||||
key: objectKey,
|
||||
date: storingResult.lastModified,
|
||||
tags: storingResult.tags,
|
||||
return async.waterfall([
|
||||
function handleTransientOrDeleteBuckets(next) {
|
||||
if (bucket.hasTransientFlag() || bucket.hasDeletedFlag()) {
|
||||
return cleanUpBucket(bucket, canonicalID, log, next);
|
||||
}
|
||||
return next();
|
||||
},
|
||||
});
|
||||
|
||||
// Utapi expects null or a number for oldByteLength:
|
||||
// * null - new object
|
||||
// * 0 or > 0 - existing object with content-length 0 or > 0
|
||||
// objMD here is the master version that we would
|
||||
// have overwritten if there was an existing version or object
|
||||
//
|
||||
// TODO: Handle utapi metrics for null version overwrites.
|
||||
const oldByteLength = objMD && objMD['content-length']
|
||||
!== undefined ? objMD['content-length'] : null;
|
||||
if (storingResult) {
|
||||
// ETag's hex should always be enclosed in quotes
|
||||
responseHeaders.ETag = `"${storingResult.contentMD5}"`;
|
||||
}
|
||||
const vcfg = bucket.getVersioningConfiguration();
|
||||
const isVersionedObj = vcfg && vcfg.Status === 'Enabled';
|
||||
if (isVersionedObj) {
|
||||
if (storingResult && storingResult.versionId) {
|
||||
responseHeaders['x-amz-version-id'] =
|
||||
versionIdUtils.encode(storingResult.versionId,
|
||||
config.versionIdEncodingType);
|
||||
function getSSEConfig(next) {
|
||||
return getObjectSSEConfiguration(headers, bucket, log,
|
||||
(err, sseConfig) => {
|
||||
if (err) {
|
||||
log.error('error getting server side encryption config', { err });
|
||||
return next(invalidSSEError);
|
||||
}
|
||||
return next(null, sseConfig);
|
||||
},
|
||||
);
|
||||
},
|
||||
function createCipherBundle(serverSideEncryptionConfig, next) {
|
||||
if (serverSideEncryptionConfig) {
|
||||
return kms.createCipherBundle(
|
||||
serverSideEncryptionConfig, log, next);
|
||||
}
|
||||
return next(null, null);
|
||||
},
|
||||
function objectCreateAndStore(cipherBundle, next) {
|
||||
const objectLockValidationError
|
||||
= validateHeaders(bucket, headers, log);
|
||||
if (objectLockValidationError) {
|
||||
return next(objectLockValidationError);
|
||||
}
|
||||
writeContinue(request, request._response);
|
||||
return createAndStoreObject(bucketName,
|
||||
bucket, objectKey, objMD, authInfo, canonicalID, cipherBundle,
|
||||
request, false, streamingV4Params, log, next);
|
||||
},
|
||||
], (err, storingResult) => {
|
||||
if (err) {
|
||||
return callback(err, responseHeaders);
|
||||
}
|
||||
}
|
||||
const newByteLength = parsedContentLength;
|
||||
|
||||
// Only pre-existing non-versioned objects get 0 all others use 1
|
||||
const numberOfObjects = !isVersionedObj && oldByteLength !== null ? 0 : 1;
|
||||
setExpirationHeaders(responseHeaders, {
|
||||
lifecycleConfig: bucket.getLifecycleConfiguration(),
|
||||
objectParams: {
|
||||
key: objectKey,
|
||||
date: storingResult.lastModified,
|
||||
tags: storingResult.tags,
|
||||
},
|
||||
});
|
||||
|
||||
// only the bucket owner's metrics should be updated, regardless of
|
||||
// who the requester is
|
||||
pushMetric('putObject', log, {
|
||||
authInfo,
|
||||
canonicalID: bucket.getOwner(),
|
||||
bucket: bucketName,
|
||||
keys: [objectKey],
|
||||
newByteLength,
|
||||
oldByteLength: isVersionedObj ? null : oldByteLength,
|
||||
versionId: isVersionedObj && storingResult ? storingResult.versionId : undefined,
|
||||
location: bucket.getLocationConstraint(),
|
||||
numberOfObjects,
|
||||
// Utapi expects null or a number for oldByteLength:
|
||||
// * null - new object
|
||||
// * 0 or > 0 - existing object with content-length 0 or > 0
|
||||
// objMD here is the master version that we would
|
||||
// have overwritten if there was an existing version or object
|
||||
//
|
||||
// TODO: Handle utapi metrics for null version overwrites.
|
||||
const oldByteLength = objMD && objMD['content-length']
|
||||
!== undefined ? objMD['content-length'] : null;
|
||||
if (storingResult) {
|
||||
// ETag's hex should always be enclosed in quotes
|
||||
responseHeaders.ETag = `"${storingResult.contentMD5}"`;
|
||||
}
|
||||
const vcfg = bucket.getVersioningConfiguration();
|
||||
const isVersionedObj = vcfg && vcfg.Status === 'Enabled';
|
||||
if (isVersionedObj) {
|
||||
if (storingResult && storingResult.versionId) {
|
||||
responseHeaders['x-amz-version-id'] =
|
||||
versionIdUtils.encode(storingResult.versionId,
|
||||
config.versionIdEncodingType);
|
||||
}
|
||||
}
|
||||
|
||||
// Only pre-existing non-versioned objects get 0 all others use 1
|
||||
const numberOfObjects = !isVersionedObj && oldByteLength !== null ? 0 : 1;
|
||||
|
||||
// only the bucket owner's metrics should be updated, regardless of
|
||||
// who the requester is
|
||||
pushMetric('putObject', log, {
|
||||
authInfo,
|
||||
canonicalID: bucket.getOwner(),
|
||||
bucket: bucketName,
|
||||
keys: [objectKey],
|
||||
newByteLength,
|
||||
oldByteLength: isVersionedObj ? null : oldByteLength,
|
||||
versionId: isVersionedObj && storingResult ? storingResult.versionId : undefined,
|
||||
location: bucket.getLocationConstraint(),
|
||||
numberOfObjects,
|
||||
});
|
||||
return callback(null, responseHeaders);
|
||||
});
|
||||
return callback(null, responseHeaders);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = objectPut;
|
||||
|
|
|
@ -99,7 +99,7 @@ function objectPutACL(authInfo, request, log, cb) {
|
|||
aclUtils.parseGrant(request.headers['x-amz-grant-read'], 'READ');
|
||||
const grantReadACPHeader =
|
||||
aclUtils.parseGrant(request.headers['x-amz-grant-read-acp'],
|
||||
'READ_ACP');
|
||||
'READ_ACP');
|
||||
const grantWriteACPHeader = aclUtils.parseGrant(
|
||||
request.headers['x-amz-grant-write-acp'], 'WRITE_ACP');
|
||||
const grantFullControlHeader = aclUtils.parseGrant(
|
||||
|
@ -119,7 +119,7 @@ function objectPutACL(authInfo, request, log, cb) {
|
|||
}
|
||||
if (objectMD.isDeleteMarker) {
|
||||
log.trace('delete marker detected',
|
||||
{ method: 'objectPutACL' });
|
||||
{ method: 'objectPutACL' });
|
||||
return next(errors.MethodNotAllowed, bucket);
|
||||
}
|
||||
return next(null, bucket, objectMD);
|
||||
|
@ -202,7 +202,7 @@ function objectPutACL(authInfo, request, log, cb) {
|
|||
if (!skip && granteeType === 'Group') {
|
||||
if (possibleGroups.indexOf(grantee.URI[0]) < 0) {
|
||||
log.trace('invalid user group',
|
||||
{ userGroup: grantee.URI[0] });
|
||||
{ userGroup: grantee.URI[0] });
|
||||
return next(errors.InvalidArgument, bucket);
|
||||
}
|
||||
return usersIdentifiedByGroup.push({
|
||||
|
@ -218,20 +218,20 @@ function objectPutACL(authInfo, request, log, cb) {
|
|||
// through the access headers
|
||||
const allGrantHeaders =
|
||||
[].concat(grantReadHeader,
|
||||
grantReadACPHeader, grantWriteACPHeader,
|
||||
grantFullControlHeader);
|
||||
grantReadACPHeader, grantWriteACPHeader,
|
||||
grantFullControlHeader);
|
||||
|
||||
usersIdentifiedByEmail = allGrantHeaders.filter(item =>
|
||||
item && item.userIDType.toLowerCase() === 'emailaddress');
|
||||
usersIdentifiedByGroup = allGrantHeaders
|
||||
.filter(itm => itm && itm.userIDType
|
||||
.toLowerCase() === 'uri');
|
||||
.toLowerCase() === 'uri');
|
||||
for (let i = 0; i < usersIdentifiedByGroup.length; i++) {
|
||||
if (possibleGroups.indexOf(
|
||||
usersIdentifiedByGroup[i].identifier) < 0) {
|
||||
usersIdentifiedByGroup[i].identifier) < 0) {
|
||||
log.trace('invalid user group',
|
||||
{ userGroup: usersIdentifiedByGroup[i]
|
||||
.identifier });
|
||||
.identifier });
|
||||
return next(errors.InvalidArgument, bucket);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -131,10 +131,10 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
}
|
||||
if (sourceObjMD.isDeleteMarker) {
|
||||
log.debug('delete marker on source object',
|
||||
{ sourceObject });
|
||||
{ sourceObject });
|
||||
if (reqVersionId) {
|
||||
const err = errors.InvalidRequest
|
||||
.customizeDescription('The source of a copy ' +
|
||||
.customizeDescription('The source of a copy ' +
|
||||
'request may not specifically refer to a delete' +
|
||||
'marker by version id.');
|
||||
return next(err, destBucketMD);
|
||||
|
@ -146,8 +146,8 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
}
|
||||
const headerValResult =
|
||||
validateHeaders(request.headers,
|
||||
sourceObjMD['last-modified'],
|
||||
sourceObjMD['content-md5']);
|
||||
sourceObjMD['last-modified'],
|
||||
sourceObjMD['content-md5']);
|
||||
if (headerValResult.error) {
|
||||
return next(errors.PreconditionFailed, destBucketMD);
|
||||
}
|
||||
|
@ -182,7 +182,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
sourceLocationConstraintName, next) {
|
||||
return metadata.getBucket(mpuBucketName, log,
|
||||
(err, mpuBucket) => {
|
||||
if (err && err.NoSuchBucket) {
|
||||
if (err?.is.NoSuchBucket) {
|
||||
return next(errors.NoSuchUpload);
|
||||
}
|
||||
if (err) {
|
||||
|
@ -209,51 +209,51 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
const mpuOverviewKey =
|
||||
`overview${splitter}${destObjectKey}${splitter}${uploadId}`;
|
||||
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey,
|
||||
null, log, (err, res) => {
|
||||
if (err) {
|
||||
if (err.NoSuchKey) {
|
||||
return next(errors.NoSuchUpload);
|
||||
}
|
||||
log.error('error getting overview object from ' +
|
||||
null, log, (err, res) => {
|
||||
if (err) {
|
||||
if (err.is.NoSuchKey) {
|
||||
return next(errors.NoSuchUpload);
|
||||
}
|
||||
log.error('error getting overview object from ' +
|
||||
'mpu bucket', {
|
||||
error: err,
|
||||
method: 'objectPutCopyPart::' +
|
||||
error: err,
|
||||
method: 'objectPutCopyPart::' +
|
||||
'metadata.getObjectMD',
|
||||
});
|
||||
return next(err);
|
||||
}
|
||||
const initiatorID = res.initiator.ID;
|
||||
const requesterID = authInfo.isRequesterAnIAMUser() ?
|
||||
authInfo.getArn() : authInfo.getCanonicalID();
|
||||
if (initiatorID !== requesterID) {
|
||||
return next(errors.AccessDenied);
|
||||
}
|
||||
const destObjLocationConstraint =
|
||||
});
|
||||
return next(err);
|
||||
}
|
||||
const initiatorID = res.initiator.ID;
|
||||
const requesterID = authInfo.isRequesterAnIAMUser() ?
|
||||
authInfo.getArn() : authInfo.getCanonicalID();
|
||||
if (initiatorID !== requesterID) {
|
||||
return next(errors.AccessDenied);
|
||||
}
|
||||
const destObjLocationConstraint =
|
||||
res.controllingLocationConstraint;
|
||||
return next(null, dataLocator, destBucketMD,
|
||||
destObjLocationConstraint, copyObjectSize,
|
||||
sourceVerId, sourceLocationConstraintName, splitter);
|
||||
});
|
||||
return next(null, dataLocator, destBucketMD,
|
||||
destObjLocationConstraint, copyObjectSize,
|
||||
sourceVerId, sourceLocationConstraintName, splitter);
|
||||
});
|
||||
},
|
||||
function goGetData(dataLocator, destBucketMD,
|
||||
destObjLocationConstraint, copyObjectSize, sourceVerId,
|
||||
sourceLocationConstraintName, splitter, next) {
|
||||
data.uploadPartCopy(request, log, destBucketMD,
|
||||
sourceLocationConstraintName,
|
||||
destObjLocationConstraint, dataLocator, dataStoreContext,
|
||||
(error, eTag, lastModified, serverSideEncryption, locations) => {
|
||||
if (error) {
|
||||
if (error.message === 'skip') {
|
||||
return next(skipError, destBucketMD, eTag,
|
||||
lastModified, sourceVerId,
|
||||
serverSideEncryption, lastModified, splitter);
|
||||
sourceLocationConstraintName,
|
||||
destObjLocationConstraint, dataLocator, dataStoreContext,
|
||||
(error, eTag, lastModified, serverSideEncryption, locations) => {
|
||||
if (error) {
|
||||
if (error.message === 'skip') {
|
||||
return next(skipError, destBucketMD, eTag,
|
||||
lastModified, sourceVerId,
|
||||
serverSideEncryption, lastModified, splitter);
|
||||
}
|
||||
return next(error, destBucketMD);
|
||||
}
|
||||
return next(error, destBucketMD);
|
||||
}
|
||||
return next(null, destBucketMD, locations, eTag,
|
||||
copyObjectSize, sourceVerId, serverSideEncryption,
|
||||
lastModified, splitter);
|
||||
});
|
||||
return next(null, destBucketMD, locations, eTag,
|
||||
copyObjectSize, sourceVerId, serverSideEncryption,
|
||||
lastModified, splitter);
|
||||
});
|
||||
},
|
||||
function getExistingPartInfo(destBucketMD, locations, totalHash,
|
||||
copyObjectSize, sourceVerId, serverSideEncryption, lastModified,
|
||||
|
@ -263,9 +263,9 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
metadata.getObjectMD(mpuBucketName, partKey, {}, log,
|
||||
(err, result) => {
|
||||
// If there is nothing being overwritten just move on
|
||||
if (err && !err.NoSuchKey) {
|
||||
if (err && !err.is.NoSuchKey) {
|
||||
log.debug('error getting current part (if any)',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
return next(err);
|
||||
}
|
||||
let oldLocations;
|
||||
|
@ -299,7 +299,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
locations, metaStoreParams, log, err => {
|
||||
if (err) {
|
||||
log.debug('error storing new metadata',
|
||||
{ error: err, method: 'storeNewPartMetadata' });
|
||||
{ error: err, method: 'storeNewPartMetadata' });
|
||||
return next(err);
|
||||
}
|
||||
return next(null, locations, oldLocations, destBucketMD, totalHash,
|
||||
|
@ -377,7 +377,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
|||
request.method, destBucketMD);
|
||||
if (err && err !== skipError) {
|
||||
log.trace('error from copy part waterfall',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
return callback(err, null, corsHeaders);
|
||||
}
|
||||
const xml = [
|
||||
|
|
|
@ -47,33 +47,33 @@ function objectPutLegalHold(authInfo, request, log, callback) {
|
|||
|
||||
return async.waterfall([
|
||||
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||
(err, bucket, objectMD) => {
|
||||
if (err) {
|
||||
log.trace('request authorization failed',
|
||||
{ method: 'objectPutLegalHold', error: err });
|
||||
return next(err);
|
||||
}
|
||||
if (!objectMD) {
|
||||
const err = versionId ? errors.NoSuchVersion :
|
||||
errors.NoSuchKey;
|
||||
log.trace('error no object metadata found',
|
||||
{ method: 'objectPutLegalHold', error: err });
|
||||
return next(err, bucket);
|
||||
}
|
||||
if (objectMD.isDeleteMarker) {
|
||||
log.trace('version is a delete marker',
|
||||
{ method: 'objectPutLegalHold' });
|
||||
return next(errors.MethodNotAllowed, bucket);
|
||||
}
|
||||
if (!bucket.isObjectLockEnabled()) {
|
||||
log.trace('object lock not enabled on bucket',
|
||||
{ method: 'objectPutLegalHold' });
|
||||
return next(errors.InvalidRequest.customizeDescription(
|
||||
'Bucket is missing Object Lock Configuration'
|
||||
), bucket);
|
||||
}
|
||||
return next(null, bucket, objectMD);
|
||||
}),
|
||||
(err, bucket, objectMD) => {
|
||||
if (err) {
|
||||
log.trace('request authorization failed',
|
||||
{ method: 'objectPutLegalHold', error: err });
|
||||
return next(err);
|
||||
}
|
||||
if (!objectMD) {
|
||||
const err = versionId ? errors.NoSuchVersion :
|
||||
errors.NoSuchKey;
|
||||
log.trace('error no object metadata found',
|
||||
{ method: 'objectPutLegalHold', error: err });
|
||||
return next(err, bucket);
|
||||
}
|
||||
if (objectMD.isDeleteMarker) {
|
||||
log.trace('version is a delete marker',
|
||||
{ method: 'objectPutLegalHold' });
|
||||
return next(errors.MethodNotAllowed, bucket);
|
||||
}
|
||||
if (!bucket.isObjectLockEnabled()) {
|
||||
log.trace('object lock not enabled on bucket',
|
||||
{ method: 'objectPutLegalHold' });
|
||||
return next(errors.InvalidRequest.customizeDescription(
|
||||
'Bucket is missing Object Lock Configuration',
|
||||
), bucket);
|
||||
}
|
||||
return next(null, bucket, objectMD);
|
||||
}),
|
||||
(bucket, objectMD, next) => {
|
||||
log.trace('parsing legal hold');
|
||||
parseLegalHoldXml(request.post, log, (err, res) =>
|
||||
|
|
|
@ -94,7 +94,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
|||
// Get the destination bucket.
|
||||
next => metadata.getBucket(bucketName, log,
|
||||
(err, destinationBucket) => {
|
||||
if (err && err.NoSuchBucket) {
|
||||
if (err?.is.NoSuchBucket) {
|
||||
return next(errors.NoSuchBucket, destinationBucket);
|
||||
}
|
||||
if (err) {
|
||||
|
@ -128,8 +128,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
|||
if (err) {
|
||||
log.error('error processing the cipher bundle for ' +
|
||||
'the destination bucket', {
|
||||
error: err,
|
||||
});
|
||||
error: err,
|
||||
});
|
||||
return next(err, destinationBucket);
|
||||
}
|
||||
return next(null, destinationBucket, res);
|
||||
|
@ -141,24 +141,24 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
|||
// Get the MPU shadow bucket.
|
||||
(destinationBucket, cipherBundle, next) =>
|
||||
metadata.getBucket(mpuBucketName, log,
|
||||
(err, mpuBucket) => {
|
||||
if (err && err.NoSuchBucket) {
|
||||
return next(errors.NoSuchUpload, destinationBucket);
|
||||
}
|
||||
if (err) {
|
||||
log.error('error getting the shadow mpu bucket', {
|
||||
error: err,
|
||||
method: 'objectPutPart::metadata.getBucket',
|
||||
});
|
||||
return next(err, destinationBucket);
|
||||
}
|
||||
let splitter = constants.splitter;
|
||||
// BACKWARD: Remove to remove the old splitter
|
||||
if (mpuBucket.getMdBucketModelVersion() < 2) {
|
||||
splitter = constants.oldSplitter;
|
||||
}
|
||||
return next(null, destinationBucket, cipherBundle, splitter);
|
||||
}),
|
||||
(err, mpuBucket) => {
|
||||
if (err?.is.NoSuchBucket) {
|
||||
return next(errors.NoSuchUpload, destinationBucket);
|
||||
}
|
||||
if (err) {
|
||||
log.error('error getting the shadow mpu bucket', {
|
||||
error: err,
|
||||
method: 'objectPutPart::metadata.getBucket',
|
||||
});
|
||||
return next(err, destinationBucket);
|
||||
}
|
||||
let splitter = constants.splitter;
|
||||
// BACKWARD: Remove to remove the old splitter
|
||||
if (mpuBucket.getMdBucketModelVersion() < 2) {
|
||||
splitter = constants.oldSplitter;
|
||||
}
|
||||
return next(null, destinationBucket, cipherBundle, splitter);
|
||||
}),
|
||||
// Check authorization of the MPU shadow bucket.
|
||||
(destinationBucket, cipherBundle, splitter, next) => {
|
||||
const mpuOverviewKey = _getOverviewKey(splitter, objectKey,
|
||||
|
@ -189,7 +189,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
|||
// If data backend is backend that handles mpu (like real AWS),
|
||||
// no need to store part info in metadata
|
||||
(destinationBucket, objectLocationConstraint, cipherBundle,
|
||||
splitter, next) => {
|
||||
splitter, next) => {
|
||||
if (config.backends.data === 'multiple') {
|
||||
// if mpu was initiated in legacy version
|
||||
if (objectLocationConstraint === undefined) {
|
||||
|
@ -204,45 +204,45 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
|||
objectLocationConstraint = backendInfoObj.controllingLC;
|
||||
}
|
||||
if (!multipleBackendGateway.isClientHandleMpu(
|
||||
objectLocationConstraint)) {
|
||||
objectLocationConstraint)) {
|
||||
// if data backend doesn't handle MPU, continue waterfall
|
||||
return next(null, destinationBucket,
|
||||
objectLocationConstraint, cipherBundle, splitter, null);
|
||||
objectLocationConstraint, cipherBundle, splitter, null);
|
||||
}
|
||||
writeContinue(request, request._response);
|
||||
return multipleBackendGateway.uploadPart(request,
|
||||
streamingV4Params, null, size, objectLocationConstraint,
|
||||
objectKey, uploadId, partNumber, bucketName, log,
|
||||
(err, partInfo) => {
|
||||
if (err) {
|
||||
log.error('error putting part to data backend', {
|
||||
error: err,
|
||||
method:
|
||||
streamingV4Params, null, size, objectLocationConstraint,
|
||||
objectKey, uploadId, partNumber, bucketName, log,
|
||||
(err, partInfo) => {
|
||||
if (err) {
|
||||
log.error('error putting part to data backend', {
|
||||
error: err,
|
||||
method:
|
||||
'objectPutPart::multipleBackendGateway.uploadPart',
|
||||
});
|
||||
return next(err, destinationBucket);
|
||||
} else if (partInfo &&
|
||||
});
|
||||
return next(err, destinationBucket);
|
||||
} else if (partInfo &&
|
||||
partInfo.dataStoreType === 'aws_s3') {
|
||||
// if data backend handles MPU, skip to end of waterfall
|
||||
return next(skipError, destinationBucket,
|
||||
partInfo.dataStoreETag);
|
||||
} else if (partInfo && partInfo.dataStoreType === 'azure') {
|
||||
return next(null, destinationBucket,
|
||||
objectLocationConstraint, cipherBundle, splitter,
|
||||
partInfo);
|
||||
}
|
||||
let msg = 'backend is managing MPU but was';
|
||||
msg += ' not handle after uploadPart';
|
||||
log.error(msg, {
|
||||
error: errors.InternalError,
|
||||
method:
|
||||
return next(skipError, destinationBucket,
|
||||
partInfo.dataStoreETag);
|
||||
} else if (partInfo && partInfo.dataStoreType === 'azure') {
|
||||
return next(null, destinationBucket,
|
||||
objectLocationConstraint, cipherBundle, splitter,
|
||||
partInfo);
|
||||
}
|
||||
let msg = 'backend is managing MPU but was';
|
||||
msg += ' not handle after uploadPart';
|
||||
log.error(msg, {
|
||||
error: errors.InternalError,
|
||||
method:
|
||||
'objectPutPart::multipleBackendGateway.uploadPart',
|
||||
});
|
||||
return next(errors.InternalError, destinationBucket);
|
||||
});
|
||||
return next(errors.InternalError, destinationBucket);
|
||||
});
|
||||
}
|
||||
return next(null, destinationBucket, objectLocationConstraint,
|
||||
cipherBundle, splitter, null);
|
||||
cipherBundle, splitter, null);
|
||||
},
|
||||
// Get any pre-existing part.
|
||||
(destinationBucket, objectLocationConstraint, cipherBundle,
|
||||
|
@ -252,7 +252,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
|||
return metadata.getObjectMD(mpuBucketName, partKey, {}, log,
|
||||
(err, res) => {
|
||||
// If there is no object with the same key, continue.
|
||||
if (err && !err.NoSuchKey) {
|
||||
if (err && !err.is.NoSuchKey) {
|
||||
log.error('error getting current part (if any)', {
|
||||
error: err,
|
||||
method: 'objectPutPart::metadata.getObjectMD',
|
||||
|
@ -278,14 +278,14 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
|||
},
|
||||
// Store in data backend.
|
||||
(destinationBucket, objectLocationConstraint, cipherBundle,
|
||||
partKey, prevObjectSize, oldLocations, partInfo, splitter, next) => {
|
||||
partKey, prevObjectSize, oldLocations, partInfo, splitter, next) => {
|
||||
// NOTE: set oldLocations to null so we do not batchDelete for now
|
||||
if (partInfo && partInfo.dataStoreType === 'azure') {
|
||||
// skip to storing metadata
|
||||
return next(null, destinationBucket, partInfo,
|
||||
partInfo.dataStoreETag,
|
||||
cipherBundle, partKey, prevObjectSize, null,
|
||||
objectLocationConstraint, splitter);
|
||||
partInfo.dataStoreETag,
|
||||
cipherBundle, partKey, prevObjectSize, null,
|
||||
objectLocationConstraint, splitter);
|
||||
}
|
||||
const objectContext = {
|
||||
bucketName,
|
||||
|
@ -311,7 +311,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
|||
// Store data locations in metadata and delete any overwritten
|
||||
// data if completeMPU hasn't been initiated yet.
|
||||
(destinationBucket, dataGetInfo, hexDigest, cipherBundle, partKey,
|
||||
prevObjectSize, oldLocations, objectLocationConstraint, splitter, next) => {
|
||||
prevObjectSize, oldLocations, objectLocationConstraint, splitter, next) => {
|
||||
// Use an array to be consistent with objectPutCopyPart where there
|
||||
// could be multiple locations.
|
||||
const partLocations = [dataGetInfo];
|
||||
|
@ -346,7 +346,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
|||
});
|
||||
},
|
||||
(partLocations, oldLocations, objectLocationConstraint, destinationBucket,
|
||||
hexDigest, prevObjectSize, splitter, next) => {
|
||||
hexDigest, prevObjectSize, splitter, next) => {
|
||||
if (!oldLocations) {
|
||||
return next(null, oldLocations, objectLocationConstraint,
|
||||
destinationBucket, hexDigest, prevObjectSize);
|
||||
|
|
|
@ -50,33 +50,33 @@ function objectPutRetention(authInfo, request, log, callback) {
|
|||
|
||||
return async.waterfall([
|
||||
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||
(err, bucket, objectMD) => {
|
||||
if (err) {
|
||||
log.trace('request authorization failed',
|
||||
{ method: 'objectPutRetention', error: err });
|
||||
return next(err);
|
||||
}
|
||||
if (!objectMD) {
|
||||
const err = reqVersionId ? errors.NoSuchVersion :
|
||||
errors.NoSuchKey;
|
||||
log.trace('error no object metadata found',
|
||||
{ method: 'objectPutRetention', error: err });
|
||||
return next(err, bucket);
|
||||
}
|
||||
if (objectMD.isDeleteMarker) {
|
||||
log.trace('version is a delete marker',
|
||||
{ method: 'objectPutRetention' });
|
||||
return next(errors.MethodNotAllowed, bucket);
|
||||
}
|
||||
if (!bucket.isObjectLockEnabled()) {
|
||||
log.trace('object lock not enabled on bucket',
|
||||
{ method: 'objectPutRetention' });
|
||||
return next(errors.InvalidRequest.customizeDescription(
|
||||
'Bucket is missing Object Lock Configuration'
|
||||
), bucket);
|
||||
}
|
||||
return next(null, bucket, objectMD);
|
||||
}),
|
||||
(err, bucket, objectMD) => {
|
||||
if (err) {
|
||||
log.trace('request authorization failed',
|
||||
{ method: 'objectPutRetention', error: err });
|
||||
return next(err);
|
||||
}
|
||||
if (!objectMD) {
|
||||
const err = reqVersionId ? errors.NoSuchVersion :
|
||||
errors.NoSuchKey;
|
||||
log.trace('error no object metadata found',
|
||||
{ method: 'objectPutRetention', error: err });
|
||||
return next(err, bucket);
|
||||
}
|
||||
if (objectMD.isDeleteMarker) {
|
||||
log.trace('version is a delete marker',
|
||||
{ method: 'objectPutRetention' });
|
||||
return next(errors.MethodNotAllowed, bucket);
|
||||
}
|
||||
if (!bucket.isObjectLockEnabled()) {
|
||||
log.trace('object lock not enabled on bucket',
|
||||
{ method: 'objectPutRetention' });
|
||||
return next(errors.InvalidRequest.customizeDescription(
|
||||
'Bucket is missing Object Lock Configuration',
|
||||
), bucket);
|
||||
}
|
||||
return next(null, bucket, objectMD);
|
||||
}),
|
||||
(bucket, objectMD, next) => {
|
||||
log.trace('parsing retention information');
|
||||
parseRetentionXml(request.post, log,
|
||||
|
|
|
@ -49,30 +49,30 @@ function objectPutTagging(authInfo, request, log, callback) {
|
|||
|
||||
return async.waterfall([
|
||||
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||
(err, bucket, objectMD) => {
|
||||
if (err) {
|
||||
log.trace('request authorization failed',
|
||||
{ method: 'objectPutTagging', error: err });
|
||||
return next(err);
|
||||
}
|
||||
if (!objectMD) {
|
||||
const err = reqVersionId ? errors.NoSuchVersion :
|
||||
errors.NoSuchKey;
|
||||
log.trace('error no object metadata found',
|
||||
{ method: 'objectPutTagging', error: err });
|
||||
return next(err, bucket);
|
||||
}
|
||||
if (objectMD.isDeleteMarker) {
|
||||
log.trace('version is a delete marker',
|
||||
{ method: 'objectPutTagging' });
|
||||
return next(errors.MethodNotAllowed, bucket);
|
||||
}
|
||||
return next(null, bucket, objectMD);
|
||||
}),
|
||||
(err, bucket, objectMD) => {
|
||||
if (err) {
|
||||
log.trace('request authorization failed',
|
||||
{ method: 'objectPutTagging', error: err });
|
||||
return next(err);
|
||||
}
|
||||
if (!objectMD) {
|
||||
const err = reqVersionId ? errors.NoSuchVersion :
|
||||
errors.NoSuchKey;
|
||||
log.trace('error no object metadata found',
|
||||
{ method: 'objectPutTagging', error: err });
|
||||
return next(err, bucket);
|
||||
}
|
||||
if (objectMD.isDeleteMarker) {
|
||||
log.trace('version is a delete marker',
|
||||
{ method: 'objectPutTagging' });
|
||||
return next(errors.MethodNotAllowed, bucket);
|
||||
}
|
||||
return next(null, bucket, objectMD);
|
||||
}),
|
||||
(bucket, objectMD, next) => {
|
||||
log.trace('parsing tag(s)');
|
||||
parseTagXml(request.post, log, (err, tags) =>
|
||||
next(err, bucket, tags, objectMD));
|
||||
next(err, bucket, tags, objectMD));
|
||||
},
|
||||
(bucket, tags, objectMD, next) => {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
|
@ -87,13 +87,13 @@ function objectPutTagging(authInfo, request, log, callback) {
|
|||
objectMD.replicationInfo, replicationInfo);
|
||||
}
|
||||
metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params,
|
||||
log, err =>
|
||||
next(err, bucket, objectMD));
|
||||
log, err =>
|
||||
next(err, bucket, objectMD));
|
||||
},
|
||||
(bucket, objectMD, next) => {
|
||||
if (config.backends.data === 'multiple') {
|
||||
return multipleBackendGateway.objectTagging('Put', objectKey,
|
||||
bucket, objectMD, log, err => next(err, bucket, objectMD));
|
||||
bucket, objectMD, log, err => next(err, bucket, objectMD));
|
||||
}
|
||||
return next(null, bucket, objectMD);
|
||||
},
|
||||
|
|
|
@ -35,7 +35,7 @@ function generateXml(xml, owner, userBuckets, splitter) {
|
|||
`<Name>${key}</Name>`,
|
||||
`<CreationDate>${bucket.value.creationDate}` +
|
||||
'</CreationDate>',
|
||||
'</Bucket>'
|
||||
'</Bucket>',
|
||||
);
|
||||
});
|
||||
xml.push('</Buckets></ListAllMyBucketsResult>');
|
||||
|
@ -68,7 +68,7 @@ function serviceGet(authInfo, request, log, callback) {
|
|||
`<DisplayName>${authInfo.getAccountDisplayName()}` +
|
||||
'</DisplayName>',
|
||||
'</Owner>',
|
||||
'<Buckets>'
|
||||
'<Buckets>',
|
||||
);
|
||||
return services.getService(authInfo, request, log, constants.splitter,
|
||||
(err, userBuckets, splitter) => {
|
||||
|
|
|
@ -100,7 +100,7 @@ function websiteGet(request, log, callback) {
|
|||
const websiteConfig = bucket.getWebsiteConfiguration();
|
||||
if (!websiteConfig) {
|
||||
return callback(errors.NoSuchWebsiteConfiguration, false, null,
|
||||
corsHeaders);
|
||||
corsHeaders);
|
||||
}
|
||||
// any errors above would be our own created generic error html
|
||||
// if have a website config, error going forward would be user's
|
||||
|
@ -141,19 +141,19 @@ function websiteGet(request, log, callback) {
|
|||
// not want to append index key to redirect location
|
||||
if (err) {
|
||||
log.trace('error retrieving object metadata',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
let returnErr = err;
|
||||
const bucketAuthorized = isBucketAuthorized(bucket,
|
||||
'bucketGet', constants.publicId, null, log, request);
|
||||
'bucketGet', constants.publicId, null, log, request);
|
||||
// if index object does not exist and bucket is private AWS
|
||||
// returns 403 - AccessDenied error.
|
||||
if (err === errors.NoSuchKey && !bucketAuthorized) {
|
||||
if (err.is.NoSuchKey && !bucketAuthorized) {
|
||||
returnErr = errors.AccessDenied;
|
||||
}
|
||||
return _errorActions(returnErr,
|
||||
websiteConfig.getErrorDocument(), routingRules,
|
||||
bucket, reqObjectKey, corsHeaders, log,
|
||||
callback);
|
||||
websiteConfig.getErrorDocument(), routingRules,
|
||||
bucket, reqObjectKey, corsHeaders, log,
|
||||
callback);
|
||||
}
|
||||
if (!isObjAuthorized(bucket, objMD, 'objectGet',
|
||||
constants.publicId, null, log, request)) {
|
||||
|
|
|
@ -101,13 +101,13 @@ function websiteHead(request, log, callback) {
|
|||
// not want to append index key to redirect location
|
||||
if (err) {
|
||||
log.trace('error retrieving object metadata',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
let returnErr = err;
|
||||
const bucketAuthorized = isBucketAuthorized(bucket,
|
||||
'bucketGet', constants.publicId, null, log, request);
|
||||
'bucketGet', constants.publicId, null, log, request);
|
||||
// if index object does not exist and bucket is private AWS
|
||||
// returns 403 - AccessDenied error.
|
||||
if (err === errors.NoSuchKey && !bucketAuthorized) {
|
||||
if (err.is.NoSuchKey && !bucketAuthorized) {
|
||||
returnErr = errors.AccessDenied;
|
||||
}
|
||||
return _errorActions(returnErr, routingRules,
|
||||
|
|
|
@ -283,7 +283,7 @@ class V4Transform extends Transform {
|
|||
}
|
||||
// get next chunk
|
||||
return callback();
|
||||
}
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ const { config } = require('../../Config');
|
|||
|
||||
const missingVerIdInternalError = errors.InternalError.customizeDescription(
|
||||
'Invalid state. Please ensure versioning is enabled ' +
|
||||
'in AWS for the location constraint and try again.'
|
||||
'in AWS for the location constraint and try again.',
|
||||
);
|
||||
|
||||
class AwsClient {
|
||||
|
@ -32,23 +32,23 @@ class AwsClient {
|
|||
}
|
||||
put(stream, size, keyContext, reqUids, callback) {
|
||||
const awsKey = this._createAwsKey(keyContext.bucketName,
|
||||
keyContext.objectKey, this._bucketMatch);
|
||||
keyContext.objectKey, this._bucketMatch);
|
||||
const metaHeaders = trimXMetaPrefix(keyContext.metaHeaders);
|
||||
const log = createLogger(reqUids);
|
||||
|
||||
const putCb = (err, data) => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from data backend',
|
||||
err, this._dataStoreName);
|
||||
err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
if (!data.VersionId) {
|
||||
logHelper(log, 'error', 'missing version id for data ' +
|
||||
'backend object', missingVerIdInternalError,
|
||||
this._dataStoreName);
|
||||
this._dataStoreName);
|
||||
return callback(missingVerIdInternalError);
|
||||
}
|
||||
const dataStoreVersionId = data.VersionId;
|
||||
|
@ -105,15 +105,15 @@ class AwsClient {
|
|||
'from datastore', err, this._dataStoreName);
|
||||
if (err.code === 'NotFound') {
|
||||
const error = errors.ServiceUnavailable
|
||||
.customizeDescription(
|
||||
'Unexpected error from AWS: "NotFound". Data on AWS ' +
|
||||
'may have been altered outside of CloudServer.'
|
||||
);
|
||||
.customizeDescription(
|
||||
'Unexpected error from AWS: "NotFound". Data on AWS ' +
|
||||
'may have been altered outside of CloudServer.',
|
||||
);
|
||||
return callback(error);
|
||||
}
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback();
|
||||
|
@ -129,11 +129,11 @@ class AwsClient {
|
|||
Range: range ? `bytes=${range[0]}-${range[1]}` : null,
|
||||
}).on('success', response => {
|
||||
log.trace('AWS GET request response headers',
|
||||
{ responseHeaders: response.httpResponse.headers });
|
||||
{ responseHeaders: response.httpResponse.headers });
|
||||
});
|
||||
const stream = request.createReadStream().on('error', err => {
|
||||
logHelper(log, 'error', 'error streaming data from AWS',
|
||||
err, this._dataStoreName);
|
||||
err, this._dataStoreName);
|
||||
return callback(err);
|
||||
});
|
||||
return callback(null, stream);
|
||||
|
@ -159,8 +159,8 @@ class AwsClient {
|
|||
return callback();
|
||||
}
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback();
|
||||
|
@ -170,33 +170,33 @@ class AwsClient {
|
|||
healthcheck(location, callback) {
|
||||
const awsResp = {};
|
||||
this._client.headBucket({ Bucket: this._awsBucketName },
|
||||
err => {
|
||||
err => {
|
||||
/* eslint-disable no-param-reassign */
|
||||
if (err) {
|
||||
awsResp[location] = { error: err, external: true };
|
||||
return callback(null, awsResp);
|
||||
}
|
||||
return this._client.getBucketVersioning({
|
||||
Bucket: this._awsBucketName },
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
awsResp[location] = { error: err, external: true };
|
||||
} else if (!data.Status ||
|
||||
data.Status === 'Suspended') {
|
||||
awsResp[location] = {
|
||||
versioningStatus: data.Status,
|
||||
error: 'Versioning must be enabled',
|
||||
external: true,
|
||||
};
|
||||
} else {
|
||||
awsResp[location] = {
|
||||
versioningStatus: data.Status,
|
||||
message: 'Congrats! You own the bucket',
|
||||
};
|
||||
return callback(null, awsResp);
|
||||
}
|
||||
return callback(null, awsResp);
|
||||
return this._client.getBucketVersioning({
|
||||
Bucket: this._awsBucketName },
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
awsResp[location] = { error: err, external: true };
|
||||
} else if (!data.Status ||
|
||||
data.Status === 'Suspended') {
|
||||
awsResp[location] = {
|
||||
versioningStatus: data.Status,
|
||||
error: 'Versioning must be enabled',
|
||||
external: true,
|
||||
};
|
||||
} else {
|
||||
awsResp[location] = {
|
||||
versioningStatus: data.Status,
|
||||
message: 'Congrats! You own the bucket',
|
||||
};
|
||||
}
|
||||
return callback(null, awsResp);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
createMPU(key, metaHeaders, bucketName, websiteRedirectHeader, contentType,
|
||||
|
@ -228,10 +228,10 @@ class AwsClient {
|
|||
return this._client.createMultipartUpload(params, (err, mpuResObj) => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from data backend',
|
||||
err, this._dataStoreName);
|
||||
err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback(null, mpuResObj);
|
||||
|
@ -239,7 +239,7 @@ class AwsClient {
|
|||
}
|
||||
|
||||
uploadPart(request, streamingV4Params, stream, size, key, uploadId,
|
||||
partNumber, bucketName, log, callback) {
|
||||
partNumber, bucketName, log, callback) {
|
||||
let hashedStream = stream;
|
||||
if (request) {
|
||||
const partStream = prepareStream(request, streamingV4Params,
|
||||
|
@ -258,8 +258,8 @@ class AwsClient {
|
|||
logHelper(log, 'error', 'err from data backend ' +
|
||||
'on uploadPart', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
// Because we manually add quotes to ETag later, remove quotes here
|
||||
|
@ -276,7 +276,7 @@ class AwsClient {
|
|||
}
|
||||
|
||||
listParts(key, uploadId, bucketName, partNumberMarker, maxParts, log,
|
||||
callback) {
|
||||
callback) {
|
||||
const awsBucket = this._awsBucketName;
|
||||
const awsKey = this._createAwsKey(bucketName, key, this._bucketMatch);
|
||||
const params = { Bucket: awsBucket, Key: awsKey, UploadId: uploadId,
|
||||
|
@ -284,10 +284,10 @@ class AwsClient {
|
|||
return this._client.listParts(params, (err, partList) => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from data backend on listPart',
|
||||
err, this._dataStoreName);
|
||||
err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
// build storedParts object to mimic Scality S3 backend returns
|
||||
|
@ -348,46 +348,46 @@ class AwsClient {
|
|||
};
|
||||
const completeObjData = { key: awsKey };
|
||||
return this._client.completeMultipartUpload(mpuParams,
|
||||
(err, completeMpuRes) => {
|
||||
if (err) {
|
||||
if (mpuError[err.code]) {
|
||||
logHelper(log, 'trace', 'err from data backend on ' +
|
||||
'completeMPU', err, this._dataStoreName);
|
||||
return callback(errors[err.code]);
|
||||
}
|
||||
logHelper(log, 'error', 'err from data backend on ' +
|
||||
'completeMPU', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
);
|
||||
}
|
||||
if (!completeMpuRes.VersionId) {
|
||||
logHelper(log, 'error', 'missing version id for data ' +
|
||||
'backend object', missingVerIdInternalError,
|
||||
this._dataStoreName);
|
||||
return callback(missingVerIdInternalError);
|
||||
}
|
||||
// need to get content length of new object to store
|
||||
// in our metadata
|
||||
return this._client.headObject({ Bucket: awsBucket, Key: awsKey },
|
||||
(err, objHeaders) => {
|
||||
(err, completeMpuRes) => {
|
||||
if (err) {
|
||||
logHelper(log, 'trace', 'err from data backend on ' +
|
||||
'headObject', err, this._dataStoreName);
|
||||
if (mpuError[err.code]) {
|
||||
logHelper(log, 'trace', 'err from data backend on ' +
|
||||
'completeMPU', err, this._dataStoreName);
|
||||
return callback(errors[err.code]);
|
||||
}
|
||||
logHelper(log, 'error', 'err from data backend on ' +
|
||||
'completeMPU', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
// remove quotes from eTag because they're added later
|
||||
completeObjData.eTag = completeMpuRes.ETag
|
||||
.substring(1, completeMpuRes.ETag.length - 1);
|
||||
completeObjData.dataStoreVersionId = completeMpuRes.VersionId;
|
||||
completeObjData.contentLength = objHeaders.ContentLength;
|
||||
return callback(null, completeObjData);
|
||||
if (!completeMpuRes.VersionId) {
|
||||
logHelper(log, 'error', 'missing version id for data ' +
|
||||
'backend object', missingVerIdInternalError,
|
||||
this._dataStoreName);
|
||||
return callback(missingVerIdInternalError);
|
||||
}
|
||||
// need to get content length of new object to store
|
||||
// in our metadata
|
||||
return this._client.headObject({ Bucket: awsBucket, Key: awsKey },
|
||||
(err, objHeaders) => {
|
||||
if (err) {
|
||||
logHelper(log, 'trace', 'err from data backend on ' +
|
||||
'headObject', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
// remove quotes from eTag because they're added later
|
||||
completeObjData.eTag = completeMpuRes.ETag
|
||||
.substring(1, completeMpuRes.ETag.length - 1);
|
||||
completeObjData.dataStoreVersionId = completeMpuRes.VersionId;
|
||||
completeObjData.contentLength = objHeaders.ContentLength;
|
||||
return callback(null, completeObjData);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
abortMPU(key, uploadId, bucketName, log, callback) {
|
||||
|
@ -402,8 +402,8 @@ class AwsClient {
|
|||
'the MPU on AWS S3. You should abort directly on AWS S3 ' +
|
||||
'using the same uploadId.', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback();
|
||||
|
@ -430,8 +430,8 @@ class AwsClient {
|
|||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'putObjectTagging', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback();
|
||||
|
@ -452,19 +452,19 @@ class AwsClient {
|
|||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'deleteObjectTagging', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback();
|
||||
});
|
||||
}
|
||||
copyObject(request, destLocationConstraintName, sourceKey,
|
||||
sourceLocationConstraintName, storeMetadataParams, log, callback) {
|
||||
sourceLocationConstraintName, storeMetadataParams, log, callback) {
|
||||
const destBucketName = request.bucketName;
|
||||
const destObjectKey = request.objectKey;
|
||||
const destAwsKey = this._createAwsKey(destBucketName, destObjectKey,
|
||||
this._bucketMatch);
|
||||
this._bucketMatch);
|
||||
|
||||
const sourceAwsBucketName =
|
||||
config.getAwsBucketName(sourceLocationConstraintName);
|
||||
|
@ -489,32 +489,32 @@ class AwsClient {
|
|||
`${sourceAwsBucketName} AWS bucket`, err,
|
||||
this._dataStoreName);
|
||||
return callback(errors.AccessDenied
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceAwsBucketName} AWS bucket`)
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceAwsBucketName} AWS bucket`),
|
||||
);
|
||||
}
|
||||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'copyObject', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
if (!copyResult.VersionId) {
|
||||
logHelper(log, 'error', 'missing version id for data ' +
|
||||
'backend object', missingVerIdInternalError,
|
||||
this._dataStoreName);
|
||||
this._dataStoreName);
|
||||
return callback(missingVerIdInternalError);
|
||||
}
|
||||
return callback(null, destAwsKey, copyResult.VersionId);
|
||||
});
|
||||
}
|
||||
uploadPartCopy(request, awsSourceKey, sourceLocationConstraintName,
|
||||
log, callback) {
|
||||
log, callback) {
|
||||
const destBucketName = request.bucketName;
|
||||
const destObjectKey = request.objectKey;
|
||||
const destAwsKey = this._createAwsKey(destBucketName, destObjectKey,
|
||||
this._bucketMatch);
|
||||
this._bucketMatch);
|
||||
|
||||
const sourceAwsBucketName =
|
||||
config.getAwsBucketName(sourceLocationConstraintName);
|
||||
|
@ -538,15 +538,15 @@ class AwsClient {
|
|||
`${sourceAwsBucketName} AWS bucket`, err,
|
||||
this._dataStoreName);
|
||||
return callback(errors.AccessDenied
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceAwsBucketName} AWS bucket`)
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceAwsBucketName} AWS bucket`),
|
||||
);
|
||||
}
|
||||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'uploadPartCopy', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
const eTag = removeQuotes(res.CopyPartResult.ETag);
|
||||
|
|
|
@ -42,7 +42,7 @@ class AzureClient {
|
|||
if (log) {
|
||||
log.error('error thrown by Azure Storage Client Library',
|
||||
{ error: err.message, stack: err.stack, s3Method,
|
||||
azureMethod, dataStoreName: this._dataStoreName });
|
||||
azureMethod, dataStoreName: this._dataStoreName });
|
||||
}
|
||||
cb(error.customizeDescription('Error from Azure ' +
|
||||
`method: ${azureMethod} on ${s3Method} S3 call: ` +
|
||||
|
@ -83,75 +83,75 @@ class AzureClient {
|
|||
};
|
||||
|
||||
return metadata.listMultipartUploads(mpuBucketName, listingParams,
|
||||
log, (err, mpuList) => {
|
||||
if (err && !err.NoSuchBucket) {
|
||||
log.error('Error listing MPUs for Azure delete',
|
||||
{ error: err, dataStoreName });
|
||||
return cb(errors.ServiceUnavailable);
|
||||
}
|
||||
if (mpuList && mpuList.Uploads && mpuList.Uploads.length > 0) {
|
||||
const error = errors.MPUinProgress;
|
||||
log.error('Error: cannot put/delete object to Azure with ' +
|
||||
log, (err, mpuList) => {
|
||||
if (err && !err.NoSuchBucket) {
|
||||
log.error('Error listing MPUs for Azure delete',
|
||||
{ error: err, dataStoreName });
|
||||
return cb(errors.ServiceUnavailable);
|
||||
}
|
||||
if (mpuList && mpuList.Uploads && mpuList.Uploads.length > 0) {
|
||||
const error = errors.MPUinProgress;
|
||||
log.error('Error: cannot put/delete object to Azure with ' +
|
||||
'same key name as ongoing MPU on Azure',
|
||||
{ error, dataStoreName });
|
||||
return cb(error);
|
||||
}
|
||||
// If listMultipartUploads returns a NoSuchBucket error or the
|
||||
// mpu list is empty, there are no conflicting MPUs, so continue
|
||||
return cb();
|
||||
});
|
||||
return cb(error);
|
||||
}
|
||||
// If listMultipartUploads returns a NoSuchBucket error or the
|
||||
// mpu list is empty, there are no conflicting MPUs, so continue
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
|
||||
put(stream, size, keyContext, reqUids, callback) {
|
||||
const log = createLogger(reqUids);
|
||||
// before blob is put, make sure there is no ongoing MPU with same key
|
||||
this.protectAzureBlocks(keyContext.bucketName,
|
||||
keyContext.objectKey, this._dataStoreName, log, err => {
|
||||
keyContext.objectKey, this._dataStoreName, log, err => {
|
||||
// if error returned, there is ongoing MPU, so do not put
|
||||
if (err) {
|
||||
return callback(err.customizeDescription(
|
||||
`Error putting object to Azure: ${err.message}`));
|
||||
}
|
||||
const azureKey = this._createAzureKey(keyContext.bucketName,
|
||||
keyContext.objectKey, this._bucketMatch);
|
||||
const options = {
|
||||
metadata: translateAzureMetaHeaders(keyContext.metaHeaders,
|
||||
keyContext.tagging),
|
||||
contentSettings: {
|
||||
contentType: keyContext.contentType || undefined,
|
||||
cacheControl: keyContext.cacheControl || undefined,
|
||||
contentDisposition: keyContext.contentDisposition ||
|
||||
if (err) {
|
||||
return callback(err.customizeDescription(
|
||||
`Error putting object to Azure: ${err.message}`));
|
||||
}
|
||||
const azureKey = this._createAzureKey(keyContext.bucketName,
|
||||
keyContext.objectKey, this._bucketMatch);
|
||||
const options = {
|
||||
metadata: translateAzureMetaHeaders(keyContext.metaHeaders,
|
||||
keyContext.tagging),
|
||||
contentSettings: {
|
||||
contentType: keyContext.contentType || undefined,
|
||||
cacheControl: keyContext.cacheControl || undefined,
|
||||
contentDisposition: keyContext.contentDisposition ||
|
||||
undefined,
|
||||
contentEncoding: keyContext.contentEncoding || undefined,
|
||||
},
|
||||
};
|
||||
if (size === 0) {
|
||||
return this._errorWrapper('put', 'createBlockBlobFromText',
|
||||
[this._azureContainerName, azureKey, '', options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data ' +
|
||||
contentEncoding: keyContext.contentEncoding || undefined,
|
||||
},
|
||||
};
|
||||
if (size === 0) {
|
||||
return this._errorWrapper('put', 'createBlockBlobFromText',
|
||||
[this._azureContainerName, azureKey, '', options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data ' +
|
||||
'backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
}
|
||||
return callback(null, azureKey);
|
||||
}], log, callback);
|
||||
}
|
||||
return this._errorWrapper('put', 'createBlockBlobFromStream',
|
||||
[this._azureContainerName, azureKey, stream, size, options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data ' +
|
||||
}
|
||||
return callback(null, azureKey);
|
||||
}], log, callback);
|
||||
}
|
||||
return this._errorWrapper('put', 'createBlockBlobFromStream',
|
||||
[this._azureContainerName, azureKey, stream, size, options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data ' +
|
||||
'backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
}
|
||||
return callback(null, azureKey);
|
||||
}], log, callback);
|
||||
});
|
||||
}
|
||||
return callback(null, azureKey);
|
||||
}], log, callback);
|
||||
});
|
||||
}
|
||||
|
||||
head(objectGetInfo, reqUids, callback) {
|
||||
|
@ -159,24 +159,24 @@ class AzureClient {
|
|||
const { key, azureStreamingOptions } = objectGetInfo;
|
||||
return this._errorWrapper('head', 'getBlobProperties',
|
||||
[this._azureContainerName, key, azureStreamingOptions,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure HEAD data backend',
|
||||
err, this._dataStoreName);
|
||||
if (err.code === 'NotFound') {
|
||||
const error = errors.ServiceUnavailable
|
||||
.customizeDescription(
|
||||
'Unexpected error from Azure: "NotFound". Data ' +
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure HEAD data backend',
|
||||
err, this._dataStoreName);
|
||||
if (err.code === 'NotFound') {
|
||||
const error = errors.ServiceUnavailable
|
||||
.customizeDescription(
|
||||
'Unexpected error from Azure: "NotFound". Data ' +
|
||||
'on Azure may have been altered outside of ' +
|
||||
'CloudServer.');
|
||||
return callback(error);
|
||||
}
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
return callback(error);
|
||||
}
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}
|
||||
|
||||
get(objectGetInfo, range, reqUids, callback) {
|
||||
|
@ -195,14 +195,14 @@ class AzureClient {
|
|||
}
|
||||
this._errorWrapper('get', 'getBlobToStream',
|
||||
[this._azureContainerName, key, response, streamingOptions,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure GET data backend',
|
||||
err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
}
|
||||
return callback(null, response);
|
||||
}], log, callback);
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure GET data backend',
|
||||
err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
}
|
||||
return callback(null, response);
|
||||
}], log, callback);
|
||||
}
|
||||
|
||||
delete(objectGetInfo, reqUids, callback) {
|
||||
|
@ -212,17 +212,17 @@ class AzureClient {
|
|||
objectGetInfo.key;
|
||||
return this._errorWrapper('delete', 'deleteBlobIfExists',
|
||||
[this._azureContainerName, key,
|
||||
err => {
|
||||
if (err) {
|
||||
const log = createLogger(reqUids);
|
||||
logHelper(log, 'error', 'error deleting object from ' +
|
||||
err => {
|
||||
if (err) {
|
||||
const log = createLogger(reqUids);
|
||||
logHelper(log, 'error', 'error deleting object from ' +
|
||||
'Azure datastore', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}
|
||||
|
||||
healthcheck(location, callback, flightCheckOnStartUp) {
|
||||
|
@ -246,7 +246,7 @@ class AzureClient {
|
|||
}
|
||||
|
||||
uploadPart(request, streamingV4Params, partStream, size, key, uploadId,
|
||||
partNumber, bucket, log, callback) {
|
||||
partNumber, bucket, log, callback) {
|
||||
const azureKey = this._createAzureKey(bucket, key, this._bucketMatch);
|
||||
const params = { bucketName: this._azureContainerName,
|
||||
partNumber, size, objectKey: azureKey, uploadId };
|
||||
|
@ -274,27 +274,27 @@ class AzureClient {
|
|||
if (size <= azureMpuUtils.maxSubPartSize) {
|
||||
const errorWrapperFn = this._errorWrapper.bind(this);
|
||||
return azureMpuUtils.putSinglePart(errorWrapperFn,
|
||||
stream, params, this._dataStoreName, log, (err, dataStoreETag) => {
|
||||
stream, params, this._dataStoreName, log, (err, dataStoreETag) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
dataRetrievalInfo.dataStoreETag = dataStoreETag;
|
||||
return callback(null, dataRetrievalInfo);
|
||||
});
|
||||
}
|
||||
const errorWrapperFn = this._errorWrapper.bind(this);
|
||||
return azureMpuUtils.putSubParts(errorWrapperFn, stream,
|
||||
params, this._dataStoreName, log, (err, dataStoreETag) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
dataRetrievalInfo.dataStoreETag = dataStoreETag;
|
||||
return callback(null, dataRetrievalInfo);
|
||||
});
|
||||
}
|
||||
const errorWrapperFn = this._errorWrapper.bind(this);
|
||||
return azureMpuUtils.putSubParts(errorWrapperFn, stream,
|
||||
params, this._dataStoreName, log, (err, dataStoreETag) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
dataRetrievalInfo.dataStoreETag = dataStoreETag;
|
||||
return callback(null, dataRetrievalInfo);
|
||||
});
|
||||
}
|
||||
|
||||
completeMPU(jsonList, mdInfo, key, uploadId, bucket, metaHeaders,
|
||||
contentSettings, log, callback) {
|
||||
contentSettings, log, callback) {
|
||||
const azureKey = this._createAzureKey(bucket, key, this._bucketMatch);
|
||||
const commitList = {
|
||||
UncommittedBlocks: jsonList.uncommittedBlocks || [],
|
||||
|
@ -319,20 +319,20 @@ class AzureClient {
|
|||
};
|
||||
this._errorWrapper('completeMPU', 'commitBlocks',
|
||||
[this._azureContainerName, azureKey, commitList, options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err completing MPU on Azure ' +
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err completing MPU on Azure ' +
|
||||
'datastore', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
}
|
||||
const completeObjData = {
|
||||
key: azureKey,
|
||||
filteredPartsObj,
|
||||
};
|
||||
return callback(null, completeObjData);
|
||||
}], log, callback);
|
||||
}
|
||||
const completeObjData = {
|
||||
key: azureKey,
|
||||
filteredPartsObj,
|
||||
};
|
||||
return callback(null, completeObjData);
|
||||
}], log, callback);
|
||||
}
|
||||
|
||||
objectPutTagging(key, bucket, objectMD, log, callback) {
|
||||
|
@ -341,14 +341,14 @@ class AzureClient {
|
|||
azureMD.tags = JSON.stringify(objectMD.tags);
|
||||
this._errorWrapper('objectPutTagging', 'setBlobMetadata',
|
||||
[this._azureContainerName, azureKey, azureMD,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to ' +
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to ' +
|
||||
'Azure backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}
|
||||
|
||||
objectDeleteTagging(key, bucket, objectMD, log, callback) {
|
||||
|
@ -356,27 +356,27 @@ class AzureClient {
|
|||
const azureMD = this._getMetaHeaders(objectMD);
|
||||
this._errorWrapper('objectDeleteTagging', 'setBlobMetadata',
|
||||
[this._azureContainerName, azureKey, azureMD,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to ' +
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to ' +
|
||||
'Azure backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}
|
||||
|
||||
copyObject(request, destLocationConstraintName, sourceKey,
|
||||
sourceLocationConstraintName, storeMetadataParams, log, callback) {
|
||||
sourceLocationConstraintName, storeMetadataParams, log, callback) {
|
||||
const destContainerName = request.bucketName;
|
||||
const destObjectKey = request.objectKey;
|
||||
|
||||
const destAzureKey = this._createAzureKey(destContainerName,
|
||||
destObjectKey, this._bucketMatch);
|
||||
destObjectKey, this._bucketMatch);
|
||||
|
||||
const sourceContainerName =
|
||||
config.locationConstraints[sourceLocationConstraintName]
|
||||
.details.azureContainerName;
|
||||
.details.azureContainerName;
|
||||
|
||||
let options;
|
||||
if (storeMetadataParams.metaHeaders) {
|
||||
|
@ -387,7 +387,7 @@ class AzureClient {
|
|||
this._errorWrapper('copyObject', 'startCopyBlob',
|
||||
[`${this._azureStorageEndpoint}` +
|
||||
`${sourceContainerName}/${sourceKey}`,
|
||||
this._azureContainerName, destAzureKey, options,
|
||||
this._azureContainerName, destAzureKey, options,
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
if (err.code === 'CannotVerifyCopySource') {
|
||||
|
@ -395,36 +395,36 @@ class AzureClient {
|
|||
`${sourceContainerName} Azure Container`, err,
|
||||
this._dataStoreName);
|
||||
return callback(errors.AccessDenied
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceContainerName} Azure Container`)
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceContainerName} Azure Container`),
|
||||
);
|
||||
}
|
||||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'copyObject', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
if (res.copy.status === 'pending') {
|
||||
logHelper(log, 'error', 'Azure copy status is pending',
|
||||
err, this._dataStoreName);
|
||||
err, this._dataStoreName);
|
||||
const copyId = res.copy.id;
|
||||
this._client.abortCopyBlob(this._azureContainerName,
|
||||
destAzureKey, copyId, err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'error from data backend ' +
|
||||
destAzureKey, copyId, err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'error from data backend ' +
|
||||
'on abortCopyBlob', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS on abortCopyBlob: ${err.message}`)
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS on abortCopyBlob: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback(errors.InvalidObjectState
|
||||
.customizeDescription('Error: Azure copy status was ' +
|
||||
'pending. It has been aborted successfully'),
|
||||
);
|
||||
}
|
||||
return callback(errors.InvalidObjectState
|
||||
.customizeDescription('Error: Azure copy status was ' +
|
||||
'pending. It has been aborted successfully')
|
||||
);
|
||||
});
|
||||
});
|
||||
}
|
||||
return callback(null, destAzureKey);
|
||||
}], log, callback);
|
||||
|
|
|
@ -14,7 +14,7 @@ const azureHealth = {
|
|||
const utils = {
|
||||
logHelper(log, level, description, error, dataStoreName) {
|
||||
log[level](description, { error: error.message,
|
||||
errorName: error.name, dataStoreName });
|
||||
errorName: error.name, dataStoreName });
|
||||
},
|
||||
// take off the 'x-amz-meta-'
|
||||
trimXMetaPrefix(obj) {
|
||||
|
@ -70,7 +70,7 @@ const utils = {
|
|||
* same account since Azure copy outside of an account is async
|
||||
*/
|
||||
externalBackendCopy(locationConstraintSrc, locationConstraintDest,
|
||||
sourceBucketMD, destBucketMD) {
|
||||
sourceBucketMD, destBucketMD) {
|
||||
const sourceBucketName = sourceBucketMD.getName();
|
||||
const destBucketName = destBucketMD.getName();
|
||||
const isSameBucket = sourceBucketName === destBucketName;
|
||||
|
@ -86,11 +86,11 @@ const utils = {
|
|||
(sourceLocationConstraintType === 'aws_s3' ||
|
||||
(sourceLocationConstraintType === 'azure' &&
|
||||
config.isSameAzureAccount(locationConstraintSrc,
|
||||
locationConstraintDest)));
|
||||
locationConstraintDest)));
|
||||
},
|
||||
|
||||
checkExternalBackend(clients, locations, type, flightCheckOnStartUp,
|
||||
externalBackendHealthCheckInterval, cb) {
|
||||
externalBackendHealthCheckInterval, cb) {
|
||||
const checkStatus = type === 'aws_s3' ? awsHealth : azureHealth;
|
||||
if (locations.length === 0) {
|
||||
return process.nextTick(cb, null, []);
|
||||
|
|
|
@ -32,16 +32,16 @@ const backend = {
|
|||
}
|
||||
cursor += data.length;
|
||||
})
|
||||
.on('end', () => {
|
||||
if (exceeded) {
|
||||
log.error('data stream exceed announced size',
|
||||
{ size, overflow: cursor });
|
||||
callback(errors.InternalError);
|
||||
} else {
|
||||
ds[count] = { value, keyContext };
|
||||
callback(null, count++);
|
||||
}
|
||||
});
|
||||
.on('end', () => {
|
||||
if (exceeded) {
|
||||
log.error('data stream exceed announced size',
|
||||
{ size, overflow: cursor });
|
||||
callback(errors.InternalError);
|
||||
} else {
|
||||
ds[count] = { value, keyContext };
|
||||
callback(null, count++);
|
||||
}
|
||||
});
|
||||
},
|
||||
|
||||
get: function getMem(objectGetInfo, range, reqUids, callback) {
|
||||
|
|
|
@ -19,14 +19,14 @@ config.on('location-constraints-update', () => {
|
|||
const multipleBackendGateway = {
|
||||
|
||||
put: (hashedStream, size, keyContext,
|
||||
backendInfo, reqUids, callback) => {
|
||||
backendInfo, reqUids, callback) => {
|
||||
const controllingLocationConstraint =
|
||||
backendInfo.getControllingLocationConstraint();
|
||||
const client = clients[controllingLocationConstraint];
|
||||
if (!client) {
|
||||
const log = createLogger(reqUids);
|
||||
log.error('no data backend matching controlling locationConstraint',
|
||||
{ controllingLocationConstraint });
|
||||
{ controllingLocationConstraint });
|
||||
return process.nextTick(() => {
|
||||
callback(errors.InternalError);
|
||||
});
|
||||
|
@ -55,7 +55,7 @@ const multipleBackendGateway = {
|
|||
log.debug('put to location', { controllingLocationConstraint });
|
||||
if (err) {
|
||||
log.error('error from datastore',
|
||||
{ error: err, dataStoreType: client.clientType });
|
||||
{ error: err, dataStoreType: client.clientType });
|
||||
return callback(errors.ServiceUnavailable);
|
||||
}
|
||||
const dataRetrievalInfo = {
|
||||
|
@ -156,29 +156,29 @@ const multipleBackendGateway = {
|
|||
}, () => {
|
||||
async.parallel([
|
||||
next => checkExternalBackend(
|
||||
clients, awsArray, 'aws_s3', flightCheckOnStartUp,
|
||||
externalBackendHealthCheckInterval, next),
|
||||
clients, awsArray, 'aws_s3', flightCheckOnStartUp,
|
||||
externalBackendHealthCheckInterval, next),
|
||||
next => checkExternalBackend(
|
||||
clients, azureArray, 'azure', flightCheckOnStartUp,
|
||||
externalBackendHealthCheckInterval, next),
|
||||
clients, azureArray, 'azure', flightCheckOnStartUp,
|
||||
externalBackendHealthCheckInterval, next),
|
||||
], (errNull, externalResp) => {
|
||||
const externalLocResults = [];
|
||||
externalResp.forEach(resp => externalLocResults.push(...resp));
|
||||
externalLocResults.forEach(locationResult =>
|
||||
Object.assign(multBackendResp, locationResult));
|
||||
Object.assign(multBackendResp, locationResult));
|
||||
callback(null, multBackendResp);
|
||||
});
|
||||
});
|
||||
},
|
||||
|
||||
createMPU: (key, metaHeaders, bucketName, websiteRedirectHeader,
|
||||
location, contentType, cacheControl, contentDisposition,
|
||||
contentEncoding, tagging, log, cb) => {
|
||||
location, contentType, cacheControl, contentDisposition,
|
||||
contentEncoding, tagging, log, cb) => {
|
||||
const client = clients[location];
|
||||
if (client.clientType === 'aws_s3') {
|
||||
return client.createMPU(key, metaHeaders, bucketName,
|
||||
websiteRedirectHeader, contentType, cacheControl,
|
||||
contentDisposition, contentEncoding, tagging, log, cb);
|
||||
websiteRedirectHeader, contentType, cacheControl,
|
||||
contentDisposition, contentEncoding, tagging, log, cb);
|
||||
}
|
||||
return cb();
|
||||
},
|
||||
|
@ -192,18 +192,18 @@ const multipleBackendGateway = {
|
|||
},
|
||||
|
||||
uploadPart: (request, streamingV4Params, stream, size, location, key,
|
||||
uploadId, partNumber, bucketName, log, cb) => {
|
||||
uploadId, partNumber, bucketName, log, cb) => {
|
||||
const client = clients[location];
|
||||
|
||||
if (client.uploadPart) {
|
||||
return client.uploadPart(request, streamingV4Params, stream, size,
|
||||
key, uploadId, partNumber, bucketName, log, cb);
|
||||
key, uploadId, partNumber, bucketName, log, cb);
|
||||
}
|
||||
return cb();
|
||||
},
|
||||
|
||||
listParts: (key, uploadId, location, bucketName, partNumberMarker, maxParts,
|
||||
log, cb) => {
|
||||
log, cb) => {
|
||||
const client = clients[location];
|
||||
|
||||
if (client.listParts) {
|
||||
|
@ -214,7 +214,7 @@ const multipleBackendGateway = {
|
|||
},
|
||||
|
||||
completeMPU: (key, uploadId, location, jsonList, mdInfo, bucketName,
|
||||
userMetadata, contentSettings, log, cb) => {
|
||||
userMetadata, contentSettings, log, cb) => {
|
||||
const client = clients[location];
|
||||
if (client.completeMPU) {
|
||||
const args = [jsonList, mdInfo, key, uploadId, bucketName];
|
||||
|
@ -262,42 +262,42 @@ const multipleBackendGateway = {
|
|||
// NOTE: using copyObject only if copying object from one external
|
||||
// backend to the same external backend
|
||||
copyObject: (request, destLocationConstraintName, externalSourceKey,
|
||||
sourceLocationConstraintName, storeMetadataParams, log, cb) => {
|
||||
sourceLocationConstraintName, storeMetadataParams, log, cb) => {
|
||||
const client = clients[destLocationConstraintName];
|
||||
if (client.copyObject) {
|
||||
return client.copyObject(request, destLocationConstraintName,
|
||||
externalSourceKey, sourceLocationConstraintName,
|
||||
storeMetadataParams, log, (err, key, dataStoreVersionId) => {
|
||||
const dataRetrievalInfo = {
|
||||
key,
|
||||
dataStoreName: destLocationConstraintName,
|
||||
dataStoreType: client.clientType,
|
||||
dataStoreVersionId,
|
||||
};
|
||||
cb(err, dataRetrievalInfo);
|
||||
});
|
||||
externalSourceKey, sourceLocationConstraintName,
|
||||
storeMetadataParams, log, (err, key, dataStoreVersionId) => {
|
||||
const dataRetrievalInfo = {
|
||||
key,
|
||||
dataStoreName: destLocationConstraintName,
|
||||
dataStoreType: client.clientType,
|
||||
dataStoreVersionId,
|
||||
};
|
||||
cb(err, dataRetrievalInfo);
|
||||
});
|
||||
}
|
||||
return cb(errors.NotImplemented
|
||||
.customizeDescription('Can not copy object from ' +
|
||||
`${client.clientType} to ${client.clientType}`));
|
||||
},
|
||||
uploadPartCopy: (request, location, awsSourceKey,
|
||||
sourceLocationConstraintName, log, cb) => {
|
||||
sourceLocationConstraintName, log, cb) => {
|
||||
const client = clients[location];
|
||||
if (client.uploadPartCopy) {
|
||||
return client.uploadPartCopy(request, awsSourceKey,
|
||||
sourceLocationConstraintName,
|
||||
log, cb);
|
||||
sourceLocationConstraintName,
|
||||
log, cb);
|
||||
}
|
||||
return cb(errors.NotImplemented.customizeDescription(
|
||||
'Can not copy object from ' +
|
||||
'Can not copy object from ' +
|
||||
`${client.clientType} to ${client.clientType}`));
|
||||
},
|
||||
protectAzureBlocks: (bucketName, objectKey, location, log, cb) => {
|
||||
const client = clients[location];
|
||||
if (client.protectAzureBlocks) {
|
||||
return client.protectAzureBlocks(bucketName, objectKey, location,
|
||||
log, cb);
|
||||
log, cb);
|
||||
}
|
||||
return cb();
|
||||
},
|
||||
|
|
|
@ -1,26 +1,18 @@
|
|||
const async = require('async');
|
||||
const { errors, s3middleware } = require('arsenal');
|
||||
const PassThrough = require('stream').PassThrough;
|
||||
const { storage } = require('arsenal');
|
||||
|
||||
const DataFileInterface = require('./file/backend');
|
||||
const inMemory = require('./in_memory/backend').backend;
|
||||
const locationConstraintCheck =
|
||||
require('../api/apiUtils/object/locationConstraintCheck');
|
||||
const multipleBackendGateway = require('./multipleBackendGateway');
|
||||
const utils = require('./external/utils');
|
||||
const { config } = require('../Config');
|
||||
const MD5Sum = s3middleware.MD5Sum;
|
||||
const NullStream = s3middleware.NullStream;
|
||||
const assert = require('assert');
|
||||
const kms = require('../kms/wrapper');
|
||||
const externalBackends = require('../../constants').externalBackends;
|
||||
const constants = require('../../constants');
|
||||
const { BackendInfo } = require('../api/apiUtils/object/BackendInfo');
|
||||
const RelayMD5Sum = require('../utilities/RelayMD5Sum');
|
||||
const skipError = new Error('skip');
|
||||
const metadata = require('../metadata/wrapper');
|
||||
const vault = require('../auth/vault');
|
||||
const locationStorageCheck =
|
||||
require('../api/apiUtils/object/locationStorageCheck');
|
||||
const { DataWrapper, MultipleBackendGateway, parseLC } = storage.data;
|
||||
const { DataFileInterface } = storage.data.file;
|
||||
const inMemory = storage.data.inMemory.datastore.backend;
|
||||
|
||||
let CdmiData;
|
||||
try {
|
||||
// eslint-disable-next-line import/no-unresolved
|
||||
CdmiData = require('cdmiclient').CdmiData;
|
||||
} catch (err) {
|
||||
CdmiData = null;
|
||||
|
@ -33,10 +25,12 @@ if (config.backends.data === 'mem') {
|
|||
client = inMemory;
|
||||
implName = 'mem';
|
||||
} else if (config.backends.data === 'file') {
|
||||
client = new DataFileInterface();
|
||||
client = new DataFileInterface(config);
|
||||
implName = 'file';
|
||||
} else if (config.backends.data === 'multiple') {
|
||||
client = multipleBackendGateway;
|
||||
const clients = parseLC(config, vault);
|
||||
client = new MultipleBackendGateway(
|
||||
clients, metadata, locationStorageCheck);
|
||||
implName = 'multipleBackends';
|
||||
} else if (config.backends.data === 'cdmi') {
|
||||
if (!CdmiData) {
|
||||
|
@ -52,780 +46,16 @@ if (config.backends.data === 'mem') {
|
|||
implName = 'cdmi';
|
||||
}
|
||||
|
||||
/**
|
||||
* _retryDelete - Attempt to delete key again if it failed previously
|
||||
* @param { string | object } objectGetInfo - either string location of object
|
||||
* to delete or object containing info of object to delete
|
||||
* @param {object} log - Werelogs request logger
|
||||
* @param {number} count - keeps count of number of times function has been run
|
||||
* @param {function} cb - callback
|
||||
* @returns undefined and calls callback
|
||||
*/
|
||||
const MAX_RETRY = 2;
|
||||
|
||||
// This check is done because on a put, complete mpu or copy request to
|
||||
// Azure/AWS, if the object already exists on that backend, the existing object
|
||||
// should not be deleted, which is the functionality for all other backends
|
||||
function _shouldSkipDelete(locations, requestMethod, newObjDataStoreName) {
|
||||
const skipMethods = { PUT: true, POST: true };
|
||||
if (!Array.isArray(locations) || !locations[0] ||
|
||||
!locations[0].dataStoreType) {
|
||||
return false;
|
||||
}
|
||||
const isSkipBackend = externalBackends[locations[0].dataStoreType];
|
||||
const isMatchingBackends =
|
||||
locations[0].dataStoreName === newObjDataStoreName;
|
||||
const isSkipMethod = skipMethods[requestMethod];
|
||||
return (isSkipBackend && isMatchingBackends && isSkipMethod);
|
||||
}
|
||||
|
||||
function _retryDelete(objectGetInfo, log, count, cb) {
|
||||
if (count > MAX_RETRY) {
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
return client.delete(objectGetInfo, log.getSerializedUids(), err => {
|
||||
if (err) {
|
||||
if (err.ObjNotFound) {
|
||||
log.info('no such key in datastore',
|
||||
{ objectGetInfo, implName, moreRetries: 'no' });
|
||||
return cb(err);
|
||||
}
|
||||
log.error('delete error from datastore',
|
||||
{ error: err, implName, moreRetries: 'yes' });
|
||||
return _retryDelete(objectGetInfo, log, count + 1, cb);
|
||||
}
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
|
||||
function _put(cipherBundle, value, valueSize,
|
||||
keyContext, backendInfo, log, cb) {
|
||||
assert.strictEqual(typeof valueSize, 'number');
|
||||
log.debug('sending put to datastore', { implName, keyContext,
|
||||
method: 'put' });
|
||||
let hashedStream = null;
|
||||
if (value) {
|
||||
hashedStream = new MD5Sum();
|
||||
value.pipe(hashedStream);
|
||||
value.once('clientError', () => {
|
||||
log.trace('destroying hashed stream');
|
||||
hashedStream.destroy();
|
||||
});
|
||||
}
|
||||
const data = new DataWrapper(
|
||||
client, implName, config, kms, metadata, locationStorageCheck, vault);
|
||||
|
||||
config.on('location-constraints-update', () => {
|
||||
if (implName === 'multipleBackends') {
|
||||
// Need to send backendInfo to client.put and
|
||||
// client.put will provide dataRetrievalInfo so no
|
||||
// need to construct here
|
||||
/* eslint-disable no-param-reassign */
|
||||
keyContext.cipherBundle = cipherBundle;
|
||||
return client.put(hashedStream,
|
||||
valueSize, keyContext, backendInfo, log.getSerializedUids(),
|
||||
(err, dataRetrievalInfo) => {
|
||||
if (err) {
|
||||
log.error('put error from datastore',
|
||||
{ error: err, implName });
|
||||
if (err.httpCode === 408) {
|
||||
return cb(errors.IncompleteBody);
|
||||
}
|
||||
return cb(errors.ServiceUnavailable);
|
||||
}
|
||||
return cb(null, dataRetrievalInfo, hashedStream);
|
||||
});
|
||||
const clients = parseLC(config, vault);
|
||||
client = new MultipleBackendGateway(
|
||||
clients, metadata, locationStorageCheck);
|
||||
data.switch(client);
|
||||
}
|
||||
/* eslint-enable no-param-reassign */
|
||||
});
|
||||
|
||||
let writeStream = hashedStream;
|
||||
if (cipherBundle && cipherBundle.cipher) {
|
||||
writeStream = cipherBundle.cipher;
|
||||
hashedStream.pipe(writeStream);
|
||||
}
|
||||
|
||||
return client.put(writeStream, valueSize, keyContext,
|
||||
log.getSerializedUids(), (err, key) => {
|
||||
if (err) {
|
||||
log.error('put error from datastore',
|
||||
{ error: err, implName });
|
||||
if (err.httpCode === 408) {
|
||||
return cb(errors.IncompleteBody);
|
||||
}
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
const dataRetrievalInfo = {
|
||||
key,
|
||||
dataStoreName: implName,
|
||||
};
|
||||
return cb(null, dataRetrievalInfo, hashedStream);
|
||||
});
|
||||
}
|
||||
|
||||
const data = {
|
||||
put: (cipherBundle, value, valueSize, keyContext, backendInfo, log, cb) => {
|
||||
_put(cipherBundle, value, valueSize, keyContext, backendInfo, log,
|
||||
(err, dataRetrievalInfo, hashedStream) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
if (hashedStream) {
|
||||
if (hashedStream.completedHash) {
|
||||
return cb(null, dataRetrievalInfo, hashedStream);
|
||||
}
|
||||
hashedStream.on('hashed', () => {
|
||||
hashedStream.removeAllListeners('hashed');
|
||||
return cb(null, dataRetrievalInfo, hashedStream);
|
||||
});
|
||||
return undefined;
|
||||
}
|
||||
return cb(null, dataRetrievalInfo);
|
||||
});
|
||||
},
|
||||
|
||||
head: (objectGetInfo, log, cb) => {
|
||||
if (implName !== 'multipleBackends') {
|
||||
// no-op if not multipleBackend implementation;
|
||||
// head is used during get just to check external backend data state
|
||||
return process.nextTick(cb);
|
||||
}
|
||||
return client.head(objectGetInfo, log.getSerializedUids(), cb);
|
||||
},
|
||||
|
||||
get: (objectGetInfo, response, log, cb) => {
|
||||
const isMdModelVersion2 = typeof(objectGetInfo) === 'string';
|
||||
const isRequiredStringKey = constants.clientsRequireStringKey[implName];
|
||||
const key = isMdModelVersion2 ? objectGetInfo : objectGetInfo.key;
|
||||
const clientGetInfo = isRequiredStringKey ? key : objectGetInfo;
|
||||
const range = objectGetInfo.range;
|
||||
|
||||
// If the key is explicitly set to null, the part to
|
||||
// be read doesn't really exist and is only made of zeroes.
|
||||
// This functionality is used by Scality-NFSD.
|
||||
// Otherwise, the key is always defined
|
||||
assert(key === null || key !== undefined);
|
||||
if (key === null) {
|
||||
cb(null, new NullStream(objectGetInfo.size, range));
|
||||
return;
|
||||
}
|
||||
log.debug('sending get to datastore', { implName,
|
||||
key, range, method: 'get' });
|
||||
// We need to use response as a writable stream for AZURE GET
|
||||
if (!isMdModelVersion2 && !isRequiredStringKey && response) {
|
||||
clientGetInfo.response = response;
|
||||
}
|
||||
client.get(clientGetInfo, range, log.getSerializedUids(),
|
||||
(err, stream) => {
|
||||
if (err) {
|
||||
log.error('get error from datastore',
|
||||
{ error: err, implName });
|
||||
return cb(errors.ServiceUnavailable);
|
||||
}
|
||||
if (objectGetInfo.cipheredDataKey) {
|
||||
const serverSideEncryption = {
|
||||
cryptoScheme: objectGetInfo.cryptoScheme,
|
||||
masterKeyId: objectGetInfo.masterKeyId,
|
||||
cipheredDataKey: Buffer.from(
|
||||
objectGetInfo.cipheredDataKey, 'base64'),
|
||||
};
|
||||
const offset = objectGetInfo.range ?
|
||||
objectGetInfo.range[0] : 0;
|
||||
return kms.createDecipherBundle(
|
||||
serverSideEncryption, offset, log,
|
||||
(err, decipherBundle) => {
|
||||
if (err) {
|
||||
log.error('cannot get decipher bundle ' +
|
||||
'from kms', {
|
||||
method: 'data.wrapper.data.get',
|
||||
});
|
||||
return cb(err);
|
||||
}
|
||||
stream.pipe(decipherBundle.decipher);
|
||||
return cb(null, decipherBundle.decipher);
|
||||
});
|
||||
}
|
||||
return cb(null, stream);
|
||||
});
|
||||
},
|
||||
|
||||
delete: (objectGetInfo, log, cb) => {
|
||||
const callback = cb || log.end;
|
||||
const isMdModelVersion2 = typeof(objectGetInfo) === 'string';
|
||||
const isRequiredStringKey = constants.clientsRequireStringKey[implName];
|
||||
const key = isMdModelVersion2 ? objectGetInfo : objectGetInfo.key;
|
||||
const clientGetInfo = isRequiredStringKey ? key : objectGetInfo;
|
||||
|
||||
log.trace('sending delete to datastore', {
|
||||
implName, key, method: 'delete' });
|
||||
// If the key is explicitly set to null, the part to
|
||||
// be deleted doesn't really exist.
|
||||
// This functionality is used by Scality-NFSD.
|
||||
// Otherwise, the key is always defined
|
||||
assert(key === null || key !== undefined);
|
||||
if (key === null) {
|
||||
callback(null);
|
||||
return;
|
||||
}
|
||||
_retryDelete(clientGetInfo, log, 0, err => {
|
||||
if (err && !err.ObjNotFound) {
|
||||
log.error('delete error from datastore',
|
||||
{ error: err, key: objectGetInfo.key, moreRetries: 'no' });
|
||||
}
|
||||
return callback(err);
|
||||
});
|
||||
},
|
||||
|
||||
batchDelete: (locations, requestMethod, newObjDataStoreName, log, cb) => {
|
||||
// TODO: The method of persistence of sproxy delete key will
|
||||
// be finalized; refer Issue #312 for the discussion. In the
|
||||
// meantime, we at least log the location of the data we are
|
||||
// about to delete before attempting its deletion.
|
||||
if (_shouldSkipDelete(locations, requestMethod, newObjDataStoreName)) {
|
||||
return process.nextTick(cb);
|
||||
}
|
||||
log.trace('initiating batch delete', {
|
||||
keys: locations,
|
||||
implName,
|
||||
method: 'batchDelete',
|
||||
});
|
||||
const keys = [];
|
||||
let backendName = '';
|
||||
const shouldBatchDelete = locations.every(l => {
|
||||
// legacy sproxyd location, should fallback to using regular delete
|
||||
if (typeof l === 'string') {
|
||||
return false;
|
||||
}
|
||||
const { dataStoreName, key } = l;
|
||||
backendName = dataStoreName;
|
||||
const type = config.getLocationConstraintType(dataStoreName);
|
||||
// filter out possible `null` created by NFS
|
||||
if (key && type === 'scality') {
|
||||
keys.push(key);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
if (shouldBatchDelete) {
|
||||
return client.batchDelete(backendName, { keys }, log, cb);
|
||||
}
|
||||
return async.eachLimit(locations, 5, (loc, next) => {
|
||||
process.nextTick(() => data.delete(loc, log, next));
|
||||
},
|
||||
err => {
|
||||
if (err) {
|
||||
log.end().error('batch delete failed', { error: err });
|
||||
// deletion of non-existing objects result in 204
|
||||
if (err.code === 404) {
|
||||
return cb();
|
||||
}
|
||||
return cb(err);
|
||||
}
|
||||
log.end().trace('batch delete successfully completed');
|
||||
return cb();
|
||||
});
|
||||
},
|
||||
|
||||
switch: newClient => {
|
||||
client = newClient;
|
||||
return client;
|
||||
},
|
||||
|
||||
checkHealth: (log, cb, flightCheckOnStartUp) => {
|
||||
if (!client.healthcheck) {
|
||||
const defResp = {};
|
||||
defResp[implName] = { code: 200, message: 'OK' };
|
||||
return cb(null, defResp);
|
||||
}
|
||||
return client.healthcheck(flightCheckOnStartUp, log, (err, result) => {
|
||||
let respBody = {};
|
||||
if (err) {
|
||||
log.error(`error from ${implName}`, { error: err });
|
||||
respBody[implName] = {
|
||||
error: err,
|
||||
};
|
||||
// error returned as null so async parallel doesn't return
|
||||
// before all backends are checked
|
||||
return cb(null, respBody);
|
||||
}
|
||||
if (implName === 'multipleBackends') {
|
||||
respBody = result;
|
||||
return cb(null, respBody);
|
||||
}
|
||||
respBody[implName] = {
|
||||
code: result.statusCode,
|
||||
message: result.statusMessage,
|
||||
};
|
||||
return cb(null, respBody);
|
||||
});
|
||||
},
|
||||
|
||||
getDiskUsage: (log, cb) => {
|
||||
if (!client.getDiskUsage) {
|
||||
log.debug('returning empty disk usage as fallback', { implName });
|
||||
return cb(null, {});
|
||||
}
|
||||
return client.getDiskUsage(log.getSerializedUids(), cb);
|
||||
},
|
||||
|
||||
|
||||
/**
|
||||
* _putForCopy - put used for copying object
|
||||
* @param {object} cipherBundle - cipher bundle that encrypt the data
|
||||
* @param {object} stream - stream containing the data
|
||||
* @param {object} part - element of dataLocator array
|
||||
* @param {object} dataStoreContext - information of the
|
||||
* destination object
|
||||
* dataStoreContext.bucketName: destination bucket name,
|
||||
* dataStoreContext.owner: owner,
|
||||
* dataStoreContext.namespace: request namespace,
|
||||
* dataStoreContext.objectKey: destination object key name,
|
||||
* @param {BackendInfo} destBackendInfo - Instance of BackendInfo:
|
||||
* Represents the info necessary to evaluate which data backend to use
|
||||
* on a data put call.
|
||||
* @param {object} log - Werelogs request logger
|
||||
* @param {function} cb - callback
|
||||
* @returns {function} cb - callback
|
||||
*/
|
||||
_putForCopy: (cipherBundle, stream, part, dataStoreContext,
|
||||
destBackendInfo, log, cb) => data.put(cipherBundle, stream,
|
||||
part.size, dataStoreContext,
|
||||
destBackendInfo, log,
|
||||
(error, partRetrievalInfo) => {
|
||||
if (error) {
|
||||
return cb(error);
|
||||
}
|
||||
const partResult = {
|
||||
key: partRetrievalInfo.key,
|
||||
dataStoreName: partRetrievalInfo
|
||||
.dataStoreName,
|
||||
dataStoreType: partRetrievalInfo
|
||||
.dataStoreType,
|
||||
start: part.start,
|
||||
size: part.size,
|
||||
};
|
||||
if (cipherBundle) {
|
||||
partResult.cryptoScheme = cipherBundle.cryptoScheme;
|
||||
partResult.cipheredDataKey = cipherBundle.cipheredDataKey;
|
||||
}
|
||||
if (part.dataStoreETag) {
|
||||
partResult.dataStoreETag = part.dataStoreETag;
|
||||
}
|
||||
if (partRetrievalInfo.dataStoreVersionId) {
|
||||
partResult.dataStoreVersionId =
|
||||
partRetrievalInfo.dataStoreVersionId;
|
||||
}
|
||||
return cb(null, partResult);
|
||||
}),
|
||||
|
||||
/**
|
||||
* _dataCopyPut - put used for copying object with and without
|
||||
* encryption
|
||||
* @param {string} serverSideEncryption - Server side encryption
|
||||
* @param {object} stream - stream containing the data
|
||||
* @param {object} part - element of dataLocator array
|
||||
* @param {object} dataStoreContext - information of the
|
||||
* destination object
|
||||
* dataStoreContext.bucketName: destination bucket name,
|
||||
* dataStoreContext.owner: owner,
|
||||
* dataStoreContext.namespace: request namespace,
|
||||
* dataStoreContext.objectKey: destination object key name,
|
||||
* @param {BackendInfo} destBackendInfo - Instance of BackendInfo:
|
||||
* Represents the info necessary to evaluate which data backend to use
|
||||
* on a data put call.
|
||||
* @param {object} log - Werelogs request logger
|
||||
* @param {function} cb - callback
|
||||
* @returns {function} cb - callback
|
||||
*/
|
||||
_dataCopyPut: (serverSideEncryption, stream, part, dataStoreContext,
|
||||
destBackendInfo, log, cb) => {
|
||||
if (serverSideEncryption) {
|
||||
return kms.createCipherBundle(
|
||||
serverSideEncryption,
|
||||
log, (err, cipherBundle) => {
|
||||
if (err) {
|
||||
log.debug('error getting cipherBundle');
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
return data._putForCopy(cipherBundle, stream, part,
|
||||
dataStoreContext, destBackendInfo, log, cb);
|
||||
});
|
||||
}
|
||||
// Copied object is not encrypted so just put it
|
||||
// without a cipherBundle
|
||||
return data._putForCopy(null, stream, part, dataStoreContext,
|
||||
destBackendInfo, log, cb);
|
||||
},
|
||||
|
||||
/**
|
||||
* copyObject - copy object
|
||||
* @param {object} request - request object
|
||||
* @param {string} sourceLocationConstraintName -
|
||||
* source locationContraint name (awsbackend, azurebackend, ...)
|
||||
* @param {object} storeMetadataParams - metadata information of the
|
||||
* source object
|
||||
* @param {array} dataLocator - source object metadata location(s)
|
||||
* NOTE: for Azure and AWS data backend this array only has one item
|
||||
* @param {object} dataStoreContext - information of the
|
||||
* destination object
|
||||
* dataStoreContext.bucketName: destination bucket name,
|
||||
* dataStoreContext.owner: owner,
|
||||
* dataStoreContext.namespace: request namespace,
|
||||
* dataStoreContext.objectKey: destination object key name,
|
||||
* @param {BackendInfo} destBackendInfo - Instance of BackendInfo:
|
||||
* Represents the info necessary to evaluate which data backend to use
|
||||
* on a data put call.
|
||||
* @param {object} sourceBucketMD - metadata of the source bucket
|
||||
* @param {object} destBucketMD - metadata of the destination bucket
|
||||
* @param {object} serverSideEncryption - server side encryption configuration
|
||||
* @param {object} log - Werelogs request logger
|
||||
* @param {function} cb - callback
|
||||
* @returns {function} cb - callback
|
||||
*/
|
||||
copyObject: (request,
|
||||
sourceLocationConstraintName, storeMetadataParams, dataLocator,
|
||||
dataStoreContext, destBackendInfo, sourceBucketMD, destBucketMD,
|
||||
serverSideEncryption, log, cb) => {
|
||||
if (config.backends.data === 'multiple' &&
|
||||
utils.externalBackendCopy(sourceLocationConstraintName,
|
||||
storeMetadataParams.dataStoreName, sourceBucketMD, destBucketMD)
|
||||
&& serverSideEncryption === null) {
|
||||
const destLocationConstraintName =
|
||||
storeMetadataParams.dataStoreName;
|
||||
const objectGetInfo = dataLocator[0];
|
||||
const externalSourceKey = objectGetInfo.key;
|
||||
return client.copyObject(request, destLocationConstraintName,
|
||||
externalSourceKey, sourceLocationConstraintName,
|
||||
storeMetadataParams, log, (error, objectRetrievalInfo) => {
|
||||
if (error) {
|
||||
return cb(error);
|
||||
}
|
||||
const putResult = {
|
||||
key: objectRetrievalInfo.key,
|
||||
dataStoreName: objectRetrievalInfo.
|
||||
dataStoreName,
|
||||
dataStoreType: objectRetrievalInfo.
|
||||
dataStoreType,
|
||||
dataStoreVersionId:
|
||||
objectRetrievalInfo.dataStoreVersionId,
|
||||
size: storeMetadataParams.size,
|
||||
dataStoreETag: objectGetInfo.dataStoreETag,
|
||||
start: objectGetInfo.start,
|
||||
};
|
||||
const putResultArr = [putResult];
|
||||
return cb(null, putResultArr);
|
||||
});
|
||||
}
|
||||
|
||||
// dataLocator is an array. need to get and put all parts
|
||||
// For now, copy 1 part at a time. Could increase the second
|
||||
// argument here to increase the number of parts
|
||||
// copied at once.
|
||||
return async.mapLimit(dataLocator, 1,
|
||||
// eslint-disable-next-line prefer-arrow-callback
|
||||
function copyPart(part, copyCb) {
|
||||
if (part.dataStoreType === 'azure') {
|
||||
const passThrough = new PassThrough();
|
||||
return async.parallel([
|
||||
parallelCb => data.get(part, passThrough, log, err =>
|
||||
parallelCb(err)),
|
||||
parallelCb => data._dataCopyPut(serverSideEncryption,
|
||||
passThrough,
|
||||
part, dataStoreContext, destBackendInfo, log,
|
||||
parallelCb),
|
||||
], (err, res) => {
|
||||
if (err) {
|
||||
return copyCb(err);
|
||||
}
|
||||
return copyCb(null, res[1]);
|
||||
});
|
||||
}
|
||||
return data.get(part, null, log, (err, stream) => {
|
||||
if (err) {
|
||||
return copyCb(err);
|
||||
}
|
||||
return data._dataCopyPut(serverSideEncryption, stream,
|
||||
part, dataStoreContext, destBackendInfo, log, copyCb);
|
||||
});
|
||||
}, (err, results) => {
|
||||
if (err) {
|
||||
log.debug('error transferring data from source',
|
||||
{ error: err });
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, results);
|
||||
});
|
||||
},
|
||||
|
||||
|
||||
_dataCopyPutPart: (request,
|
||||
serverSideEncryption, stream, part,
|
||||
dataStoreContext, destBackendInfo, locations, log, cb) => {
|
||||
const numberPartSize =
|
||||
Number.parseInt(part.size, 10);
|
||||
const partNumber = Number.parseInt(request.query.partNumber, 10);
|
||||
const uploadId = request.query.uploadId;
|
||||
const destObjectKey = request.objectKey;
|
||||
const destBucketName = request.bucketName;
|
||||
const destLocationConstraintName = destBackendInfo
|
||||
.getControllingLocationConstraint();
|
||||
if (externalBackends[config
|
||||
.locationConstraints[destLocationConstraintName]
|
||||
.type]) {
|
||||
return multipleBackendGateway.uploadPart(null, null,
|
||||
stream, numberPartSize,
|
||||
destLocationConstraintName, destObjectKey, uploadId,
|
||||
partNumber, destBucketName, log,
|
||||
(err, partInfo) => {
|
||||
if (err) {
|
||||
log.error('error putting ' +
|
||||
'part to AWS', {
|
||||
error: err,
|
||||
method:
|
||||
'objectPutCopyPart::' +
|
||||
'multipleBackendGateway.' +
|
||||
'uploadPart',
|
||||
});
|
||||
return cb(errors.ServiceUnavailable);
|
||||
}
|
||||
// skip to end of waterfall
|
||||
// because don't need to store
|
||||
// part metadata
|
||||
if (partInfo &&
|
||||
partInfo.dataStoreType === 'aws_s3') {
|
||||
// if data backend handles MPU, skip to end
|
||||
// of waterfall
|
||||
const partResult = {
|
||||
dataStoreETag: partInfo.dataStoreETag,
|
||||
};
|
||||
locations.push(partResult);
|
||||
return cb(skipError, partInfo.dataStoreETag);
|
||||
} else if (
|
||||
partInfo &&
|
||||
partInfo.dataStoreType === 'azure') {
|
||||
const partResult = {
|
||||
key: partInfo.key,
|
||||
dataStoreName: partInfo.dataStoreName,
|
||||
dataStoreETag: partInfo.dataStoreETag,
|
||||
size: numberPartSize,
|
||||
numberSubParts:
|
||||
partInfo.numberSubParts,
|
||||
partNumber: partInfo.partNumber,
|
||||
};
|
||||
locations.push(partResult);
|
||||
return cb();
|
||||
}
|
||||
return cb(skipError);
|
||||
});
|
||||
}
|
||||
if (serverSideEncryption) {
|
||||
return kms.createCipherBundle(
|
||||
serverSideEncryption,
|
||||
log, (err, cipherBundle) => {
|
||||
if (err) {
|
||||
log.debug('error getting cipherBundle',
|
||||
{ error: err });
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
return data.put(cipherBundle, stream,
|
||||
numberPartSize, dataStoreContext,
|
||||
destBackendInfo, log,
|
||||
(error, partRetrievalInfo,
|
||||
hashedStream) => {
|
||||
if (error) {
|
||||
log.debug('error putting ' +
|
||||
'encrypted part', { error });
|
||||
return cb(error);
|
||||
}
|
||||
const partResult = {
|
||||
key: partRetrievalInfo.key,
|
||||
dataStoreName: partRetrievalInfo
|
||||
.dataStoreName,
|
||||
dataStoreETag: hashedStream
|
||||
.completedHash,
|
||||
// Do not include part start
|
||||
// here since will change in
|
||||
// final MPU object
|
||||
size: numberPartSize,
|
||||
sseCryptoScheme: cipherBundle
|
||||
.cryptoScheme,
|
||||
sseCipheredDataKey: cipherBundle
|
||||
.cipheredDataKey,
|
||||
sseAlgorithm: cipherBundle
|
||||
.algorithm,
|
||||
sseMasterKeyId: cipherBundle
|
||||
.masterKeyId,
|
||||
};
|
||||
locations.push(partResult);
|
||||
return cb();
|
||||
});
|
||||
});
|
||||
}
|
||||
// Copied object is not encrypted so just put it
|
||||
// without a cipherBundle
|
||||
return data.put(null, stream, numberPartSize,
|
||||
dataStoreContext, destBackendInfo,
|
||||
log, (error, partRetrievalInfo, hashedStream) => {
|
||||
if (error) {
|
||||
log.debug('error putting object part',
|
||||
{ error });
|
||||
return cb(error);
|
||||
}
|
||||
const partResult = {
|
||||
key: partRetrievalInfo.key,
|
||||
dataStoreName: partRetrievalInfo.dataStoreName,
|
||||
dataStoreETag: hashedStream.completedHash,
|
||||
size: numberPartSize,
|
||||
};
|
||||
locations.push(partResult);
|
||||
return cb();
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
* uploadPartCopy - put copy part
|
||||
* @param {object} request - request object
|
||||
* @param {object} log - Werelogs request logger
|
||||
* @param {object} destBucketMD - destination bucket metadata
|
||||
* @param {string} sourceLocationConstraintName -
|
||||
* source locationContraint name (awsbackend, azurebackend, ...)
|
||||
* @param {string} destLocationConstraintName -
|
||||
* location of the destination MPU object (awsbackend, azurebackend, ...)
|
||||
* @param {array} dataLocator - source object metadata location(s)
|
||||
* NOTE: for Azure and AWS data backend this array
|
||||
* @param {object} dataStoreContext - information of the
|
||||
* destination object
|
||||
* dataStoreContext.bucketName: destination bucket name,
|
||||
* dataStoreContext.owner: owner,
|
||||
* dataStoreContext.namespace: request namespace,
|
||||
* dataStoreContext.objectKey: destination object key name,
|
||||
* dataStoreContext.uploadId: uploadId
|
||||
* dataStoreContext.partNumber: request.query.partNumber
|
||||
* @param {function} callback - callback
|
||||
* @returns {function} cb - callback
|
||||
*/
|
||||
uploadPartCopy: (request, log, destBucketMD, sourceLocationConstraintName,
|
||||
destLocationConstraintName, dataLocator, dataStoreContext,
|
||||
callback) => {
|
||||
const serverSideEncryption = destBucketMD.getServerSideEncryption();
|
||||
const lastModified = new Date().toJSON();
|
||||
|
||||
// skip if 0 byte object
|
||||
if (dataLocator.length === 0) {
|
||||
return process.nextTick(() => {
|
||||
callback(null, constants.emptyFileMd5,
|
||||
lastModified, serverSideEncryption, []);
|
||||
});
|
||||
}
|
||||
|
||||
// if destination mpu was initiated in legacy version
|
||||
if (destLocationConstraintName === undefined) {
|
||||
const backendInfoObj = locationConstraintCheck(request,
|
||||
null, destBucketMD, log);
|
||||
if (backendInfoObj.err) {
|
||||
return process.nextTick(() => {
|
||||
callback(backendInfoObj.err);
|
||||
});
|
||||
}
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
destLocationConstraintName = backendInfoObj.controllingLC;
|
||||
}
|
||||
|
||||
const locationTypeMatchAWS =
|
||||
config.backends.data === 'multiple' &&
|
||||
config.getLocationConstraintType(sourceLocationConstraintName) ===
|
||||
config.getLocationConstraintType(destLocationConstraintName) &&
|
||||
config.getLocationConstraintType(sourceLocationConstraintName) ===
|
||||
'aws_s3';
|
||||
|
||||
// NOTE: using multipleBackendGateway.uploadPartCopy only if copying
|
||||
// from AWS to AWS
|
||||
|
||||
if (locationTypeMatchAWS && dataLocator.length === 1) {
|
||||
const awsSourceKey = dataLocator[0].key;
|
||||
return multipleBackendGateway.uploadPartCopy(request,
|
||||
destLocationConstraintName, awsSourceKey,
|
||||
sourceLocationConstraintName, log, (error, eTag) => {
|
||||
if (error) {
|
||||
return callback(error);
|
||||
}
|
||||
return callback(skipError, eTag,
|
||||
lastModified, serverSideEncryption);
|
||||
});
|
||||
}
|
||||
|
||||
const backendInfo = new BackendInfo(destLocationConstraintName);
|
||||
|
||||
// totalHash will be sent through the RelayMD5Sum transform streams
|
||||
// to collect the md5 from multiple streams
|
||||
let totalHash;
|
||||
const locations = [];
|
||||
// dataLocator is an array. need to get and put all parts
|
||||
// in order so can get the ETag of full object
|
||||
return async.forEachOfSeries(dataLocator,
|
||||
// eslint-disable-next-line prefer-arrow-callback
|
||||
function copyPart(part, index, cb) {
|
||||
if (part.dataStoreType === 'azure') {
|
||||
const passThrough = new PassThrough();
|
||||
return async.parallel([
|
||||
next => data.get(part, passThrough, log, err => {
|
||||
if (err) {
|
||||
log.error('error getting data part ' +
|
||||
'from Azure', {
|
||||
error: err,
|
||||
method:
|
||||
'objectPutCopyPart::' +
|
||||
'multipleBackendGateway.' +
|
||||
'copyPart',
|
||||
});
|
||||
return next(err);
|
||||
}
|
||||
return next();
|
||||
}),
|
||||
next => data._dataCopyPutPart(request,
|
||||
serverSideEncryption, passThrough, part,
|
||||
dataStoreContext, backendInfo, locations, log, next),
|
||||
], err => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
return data.get(part, null, log, (err, stream) => {
|
||||
if (err) {
|
||||
log.debug('error getting object part',
|
||||
{ error: err });
|
||||
return cb(err);
|
||||
}
|
||||
const hashedStream =
|
||||
new RelayMD5Sum(totalHash, updatedHash => {
|
||||
totalHash = updatedHash;
|
||||
});
|
||||
stream.pipe(hashedStream);
|
||||
|
||||
// destLocationConstraintName is location of the
|
||||
// destination MPU object
|
||||
return data._dataCopyPutPart(request,
|
||||
serverSideEncryption, hashedStream, part,
|
||||
dataStoreContext, backendInfo, locations, log, cb);
|
||||
});
|
||||
}, err => {
|
||||
// Digest the final combination of all of the part streams
|
||||
if (err && err !== skipError) {
|
||||
log.debug('error transferring data from source',
|
||||
{ error: err, method: 'goGetData' });
|
||||
return callback(err);
|
||||
}
|
||||
if (totalHash) {
|
||||
totalHash = totalHash.digest('hex');
|
||||
} else {
|
||||
totalHash = locations[0].dataStoreETag;
|
||||
}
|
||||
if (err && err === skipError) {
|
||||
return callback(skipError, totalHash,
|
||||
lastModified, serverSideEncryption);
|
||||
}
|
||||
return callback(null, totalHash,
|
||||
lastModified, serverSideEncryption, locations);
|
||||
});
|
||||
},
|
||||
};
|
||||
|
||||
module.exports = data;
|
||||
module.exports = { data, client, implName };
|
||||
|
|
|
@ -57,7 +57,7 @@ class Common {
|
|||
return newIV;
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* Derive key to use in cipher
|
||||
* @param {number} cryptoScheme - cryptoScheme being used
|
||||
* @param {buffer} dataKey - the unencrypted key (either from the
|
||||
|
@ -84,7 +84,7 @@ class Common {
|
|||
this._keySize(), 'sha1', (err, derivedKey) => {
|
||||
if (err) {
|
||||
log.error('pbkdf2 function failed on key derivation',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
cb(errors.InternalError);
|
||||
return;
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ class Common {
|
|||
return cb(errors.InternalError);
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* createDecipher
|
||||
* @param {number} cryptoScheme - cryptoScheme being used
|
||||
* @param {buffer} dataKey - the unencrypted key (either from the
|
||||
|
@ -131,7 +131,7 @@ class Common {
|
|||
const toSkip = offset % aesBlockSize;
|
||||
const iv = this._incrementIV(derivedIV, blocks);
|
||||
const cipher = crypto.createDecipheriv(this._algorithm(),
|
||||
derivedKey, iv);
|
||||
derivedKey, iv);
|
||||
if (toSkip) {
|
||||
/* Above, we advanced to the latest boundary not
|
||||
greater than the offset amount. Here we advance by
|
||||
|
|
|
@ -40,7 +40,7 @@ const backend = {
|
|||
});
|
||||
},
|
||||
|
||||
/**
|
||||
/**
|
||||
*
|
||||
* @param {number} cryptoScheme - crypto scheme version number
|
||||
* @param {string} masterKeyId - master key; for the file backend
|
||||
|
@ -53,10 +53,10 @@ const backend = {
|
|||
* @callback called with (err, cipheredDataKey: Buffer)
|
||||
*/
|
||||
cipherDataKey: function cipherDataKeyMem(cryptoScheme,
|
||||
masterKeyId,
|
||||
plainTextDataKey,
|
||||
log,
|
||||
cb) {
|
||||
masterKeyId,
|
||||
plainTextDataKey,
|
||||
log,
|
||||
cb) {
|
||||
process.nextTick(() => {
|
||||
const masterKey = Buffer.from(masterKeyId, 'hex');
|
||||
Common.createCipher(
|
||||
|
@ -81,7 +81,7 @@ const backend = {
|
|||
});
|
||||
},
|
||||
|
||||
/**
|
||||
/**
|
||||
*
|
||||
* @param {number} cryptoScheme - crypto scheme version number
|
||||
* @param {string} masterKeyId - master key; for the file backend
|
||||
|
@ -94,10 +94,10 @@ const backend = {
|
|||
* @callback called with (err, plainTextDataKey: Buffer)
|
||||
*/
|
||||
decipherDataKey: function decipherDataKeyMem(cryptoScheme,
|
||||
masterKeyId,
|
||||
cipheredDataKey,
|
||||
log,
|
||||
cb) {
|
||||
masterKeyId,
|
||||
cipheredDataKey,
|
||||
log,
|
||||
cb) {
|
||||
process.nextTick(() => {
|
||||
const masterKey = Buffer.from(masterKeyId, 'hex');
|
||||
Common.createDecipher(
|
||||
|
|
|
@ -8,7 +8,7 @@ const backend = {
|
|||
* Target implementation will be async. let's mimic it
|
||||
*/
|
||||
|
||||
/**
|
||||
/**
|
||||
*
|
||||
* @param {string} bucketName - bucket name
|
||||
* @param {object} log - logger object
|
||||
|
@ -40,7 +40,7 @@ const backend = {
|
|||
});
|
||||
},
|
||||
|
||||
/**
|
||||
/**
|
||||
*
|
||||
* @param {number} cryptoScheme - crypto scheme version number
|
||||
* @param {string} masterKeyId - key to retrieve master key
|
||||
|
@ -51,10 +51,10 @@ const backend = {
|
|||
* @callback called with (err, cipheredDataKey: Buffer)
|
||||
*/
|
||||
cipherDataKey: function cipherDataKeyMem(cryptoScheme,
|
||||
masterKeyId,
|
||||
plainTextDataKey,
|
||||
log,
|
||||
cb) {
|
||||
masterKeyId,
|
||||
plainTextDataKey,
|
||||
log,
|
||||
cb) {
|
||||
process.nextTick(() => {
|
||||
Common.createCipher(
|
||||
cryptoScheme, kms[masterKeyId], 0, log,
|
||||
|
@ -78,7 +78,7 @@ const backend = {
|
|||
});
|
||||
},
|
||||
|
||||
/**
|
||||
/**
|
||||
*
|
||||
* @param {number} cryptoScheme - crypto scheme version number
|
||||
* @param {string} masterKeyId - key to retrieve master key
|
||||
|
@ -89,10 +89,10 @@ const backend = {
|
|||
* @callback called with (err, plainTextDataKey: Buffer)
|
||||
*/
|
||||
decipherDataKey: function decipherDataKeyMem(cryptoScheme,
|
||||
masterKeyId,
|
||||
cipheredDataKey,
|
||||
log,
|
||||
cb) {
|
||||
masterKeyId,
|
||||
cipheredDataKey,
|
||||
log,
|
||||
cb) {
|
||||
process.nextTick(() => {
|
||||
Common.createDecipher(
|
||||
cryptoScheme, kms[masterKeyId], 0, log,
|
||||
|
|
|
@ -6,12 +6,12 @@ const https = require('https');
|
|||
const logger = require('../utilities/logger');
|
||||
|
||||
function _createEncryptedBucket(host,
|
||||
port,
|
||||
bucketName,
|
||||
accessKey,
|
||||
secretKey,
|
||||
verbose, ssl,
|
||||
locationConstraint) {
|
||||
port,
|
||||
bucketName,
|
||||
accessKey,
|
||||
secretKey,
|
||||
verbose, ssl,
|
||||
locationConstraint) {
|
||||
const options = {
|
||||
host,
|
||||
port,
|
||||
|
@ -82,11 +82,11 @@ function createEncryptedBucket() {
|
|||
.option('-s, --ssl', 'Enable ssl')
|
||||
.option('-v, --verbose')
|
||||
.option('-l, --location-constraint <locationConstraint>',
|
||||
'location Constraint')
|
||||
'location Constraint')
|
||||
.parse(process.argv);
|
||||
|
||||
const { host, port, accessKey, secretKey, bucket, verbose, ssl,
|
||||
locationConstraint } = commander;
|
||||
locationConstraint } = commander;
|
||||
|
||||
if (!host || !port || !accessKey || !secretKey || !bucket) {
|
||||
logger.error('missing parameter');
|
||||
|
|
|
@ -11,14 +11,13 @@ const Common = require('./common');
|
|||
let scalityKMS;
|
||||
let scalityKMSImpl;
|
||||
try {
|
||||
// eslint-disable-next-line import/no-unresolved
|
||||
const ScalityKMS = require('scality-kms');
|
||||
scalityKMS = new ScalityKMS(config.kms);
|
||||
scalityKMSImpl = 'scalityKms';
|
||||
} catch (error) {
|
||||
logger.warn('scality kms unavailable. ' +
|
||||
'Using file kms backend unless mem specified.',
|
||||
{ error });
|
||||
{ error });
|
||||
scalityKMS = file;
|
||||
scalityKMSImpl = 'fileKms';
|
||||
}
|
||||
|
@ -47,7 +46,7 @@ if (config.backends.kms === 'mem') {
|
|||
}
|
||||
|
||||
class KMS {
|
||||
/**
|
||||
/**
|
||||
*
|
||||
* @param {string} bucketName - bucket name
|
||||
* @param {object} log - logger object
|
||||
|
@ -67,7 +66,7 @@ class KMS {
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
*
|
||||
* @param {string} bucketName - bucket name
|
||||
* @param {object} sseConfig - SSE configuration
|
||||
|
@ -105,7 +104,7 @@ class KMS {
|
|||
return cb(null, serverSideEncryptionInfo);
|
||||
});
|
||||
}
|
||||
/*
|
||||
/*
|
||||
* no encryption
|
||||
*/
|
||||
return cb(null, null);
|
||||
|
@ -144,7 +143,7 @@ class KMS {
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
/**
|
||||
* createCipherBundle
|
||||
* @param {object} serverSideEncryptionInfo - info for encryption
|
||||
* @param {number} serverSideEncryptionInfo.cryptoScheme -
|
||||
|
@ -161,7 +160,7 @@ class KMS {
|
|||
* @callback called with (err, cipherBundle)
|
||||
*/
|
||||
static createCipherBundle(serverSideEncryptionInfo,
|
||||
log, cb) {
|
||||
log, cb) {
|
||||
const dataKey = this.createDataKey(log);
|
||||
|
||||
const { algorithm, configuredMasterKeyId, masterKeyId: bucketMasterKeyId } = serverSideEncryptionInfo;
|
||||
|
@ -204,7 +203,7 @@ class KMS {
|
|||
dataKey.fill(0);
|
||||
if (err) {
|
||||
log.debug('error from kms',
|
||||
{ implName, error: err });
|
||||
{ implName, error: err });
|
||||
return next(err);
|
||||
}
|
||||
log.trace('cipher created by the kms');
|
||||
|
@ -218,13 +217,13 @@ class KMS {
|
|||
], (err, cipherBundle) => {
|
||||
if (err) {
|
||||
log.error('error processing cipher bundle',
|
||||
{ implName, error: err });
|
||||
{ implName, error: err });
|
||||
}
|
||||
return cb(err, cipherBundle);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* createDecipherBundle
|
||||
* @param {object} serverSideEncryptionInfo - info for decryption
|
||||
* @param {number} serverSideEncryptionInfo.cryptoScheme -
|
||||
|
@ -244,7 +243,7 @@ class KMS {
|
|||
* @callback called with (err, decipherBundle)
|
||||
*/
|
||||
static createDecipherBundle(serverSideEncryptionInfo, offset,
|
||||
log, cb) {
|
||||
log, cb) {
|
||||
if (!serverSideEncryptionInfo.masterKeyId ||
|
||||
!serverSideEncryptionInfo.cipheredDataKey ||
|
||||
!serverSideEncryptionInfo.cryptoScheme) {
|
||||
|
@ -265,7 +264,7 @@ class KMS {
|
|||
log.debug('deciphering a data key');
|
||||
if (err) {
|
||||
log.debug('error from kms',
|
||||
{ implName, error: err });
|
||||
{ implName, error: err });
|
||||
return next(err);
|
||||
}
|
||||
log.trace('data key deciphered by the kms');
|
||||
|
@ -279,7 +278,7 @@ class KMS {
|
|||
plainTextDataKey.fill(0);
|
||||
if (err) {
|
||||
log.debug('error from kms',
|
||||
{ implName, error: err });
|
||||
{ implName, error: err });
|
||||
return next(err);
|
||||
}
|
||||
log.trace('decipher created by the kms');
|
||||
|
@ -293,7 +292,7 @@ class KMS {
|
|||
], (err, decipherBundle) => {
|
||||
if (err) {
|
||||
log.error('error processing decipher bundle',
|
||||
{ implName, error: err });
|
||||
{ implName, error: err });
|
||||
return cb(err);
|
||||
}
|
||||
return cb(err, decipherBundle);
|
||||
|
|
|
@ -68,26 +68,26 @@ const acl = {
|
|||
let grantWriteHeader = [];
|
||||
if (resourceType === 'bucket') {
|
||||
grantWriteHeader = aclUtils
|
||||
.parseGrant(headers['x-amz-grant-write'], 'WRITE');
|
||||
.parseGrant(headers['x-amz-grant-write'], 'WRITE');
|
||||
}
|
||||
const grantReadACPHeader = aclUtils
|
||||
.parseGrant(headers['x-amz-grant-read-acp'], 'READ_ACP');
|
||||
.parseGrant(headers['x-amz-grant-read-acp'], 'READ_ACP');
|
||||
const grantWriteACPHeader = aclUtils
|
||||
.parseGrant(headers['x-amz-grant-write-acp'], 'WRITE_ACP');
|
||||
.parseGrant(headers['x-amz-grant-write-acp'], 'WRITE_ACP');
|
||||
const grantFullControlHeader = aclUtils
|
||||
.parseGrant(headers['x-amz-grant-full-control'], 'FULL_CONTROL');
|
||||
.parseGrant(headers['x-amz-grant-full-control'], 'FULL_CONTROL');
|
||||
const allGrantHeaders =
|
||||
[].concat(grantReadHeader, grantWriteHeader,
|
||||
grantReadACPHeader, grantWriteACPHeader,
|
||||
grantFullControlHeader).filter(item => item !== undefined);
|
||||
grantReadACPHeader, grantWriteACPHeader,
|
||||
grantFullControlHeader).filter(item => item !== undefined);
|
||||
if (allGrantHeaders.length === 0) {
|
||||
return cb(null, currentResourceACL);
|
||||
}
|
||||
|
||||
const usersIdentifiedByEmail = allGrantHeaders
|
||||
.filter(it => it && it.userIDType.toLowerCase() === 'emailaddress');
|
||||
.filter(it => it && it.userIDType.toLowerCase() === 'emailaddress');
|
||||
const usersIdentifiedByGroup = allGrantHeaders
|
||||
.filter(item => item && item.userIDType.toLowerCase() === 'uri');
|
||||
.filter(item => item && item.userIDType.toLowerCase() === 'uri');
|
||||
const justEmails = usersIdentifiedByEmail.map(item => item.identifier);
|
||||
const validGroups = [
|
||||
constants.allAuthedUsersId,
|
||||
|
@ -101,7 +101,7 @@ const acl = {
|
|||
}
|
||||
}
|
||||
const usersIdentifiedByID = allGrantHeaders
|
||||
.filter(item => item && item.userIDType.toLowerCase() === 'id');
|
||||
.filter(item => item && item.userIDType.toLowerCase() === 'id');
|
||||
// TODO: Consider whether want to verify with Vault
|
||||
// whether canonicalID is associated with existing
|
||||
// account before adding to ACL
|
||||
|
@ -128,7 +128,7 @@ const acl = {
|
|||
// If don't have to look up canonicalID's just sort grants
|
||||
// and add to bucket
|
||||
const revisedACL = aclUtils
|
||||
.sortHeaderGrants(allGrantHeaders, resourceACL);
|
||||
.sortHeaderGrants(allGrantHeaders, resourceACL);
|
||||
return cb(null, revisedACL);
|
||||
}
|
||||
return undefined;
|
||||
|
|
|
@ -8,7 +8,7 @@ const { config } = require('../../Config');
|
|||
class BucketClientInterface {
|
||||
constructor() {
|
||||
assert(config.bucketd.bootstrap.length > 0,
|
||||
'bucketd bootstrap list is empty');
|
||||
'bucketd bootstrap list is empty');
|
||||
const { bootstrap, log } = config.bucketd;
|
||||
if (config.https) {
|
||||
const { key, cert, ca } = config.https;
|
||||
|
@ -31,7 +31,7 @@ class BucketClientInterface {
|
|||
|
||||
createBucket(bucketName, bucketMD, log, cb) {
|
||||
this.client.createBucket(bucketName, log.getSerializedUids(),
|
||||
bucketMD.serialize(), cb);
|
||||
bucketMD.serialize(), cb);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -59,17 +59,17 @@ class BucketClientInterface {
|
|||
|
||||
getRaftBuckets(raftId, log, cb) {
|
||||
return this.client.getRaftBuckets(raftId, log.getSerializedUids(),
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, JSON.parse(data));
|
||||
});
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, JSON.parse(data));
|
||||
});
|
||||
}
|
||||
|
||||
putBucketAttributes(bucketName, bucketMD, log, cb) {
|
||||
this.client.putBucketAttributes(bucketName, log.getSerializedUids(),
|
||||
bucketMD.serialize(), cb);
|
||||
bucketMD.serialize(), cb);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -97,7 +97,7 @@ class BucketClientInterface {
|
|||
|
||||
deleteObject(bucketName, objName, params, log, cb) {
|
||||
this.client.deleteObject(bucketName, objName, log.getSerializedUids(),
|
||||
cb, params);
|
||||
cb, params);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -185,8 +185,8 @@ class BucketClientInterface {
|
|||
reason.msg = undefined;
|
||||
respBody[implName] = {
|
||||
code: 200,
|
||||
message, // Provide interpreted reason msg
|
||||
body: reason, // Provide analysis data
|
||||
message, // Provide interpreted reason msg
|
||||
body: reason, // Provide analysis data
|
||||
};
|
||||
if (failure) {
|
||||
// Setting the `error` field is how the healthCheck
|
||||
|
|
|
@ -13,7 +13,6 @@ const versionSep = arsenal.versioning.VersioningConstants.VersionId.Separator;
|
|||
const METASTORE = '__metastore';
|
||||
|
||||
class BucketFileInterface {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
* @param {object} [params] - constructor params
|
||||
|
@ -54,7 +53,7 @@ class BucketFileInterface {
|
|||
if (err) {
|
||||
this.logger.fatal('error writing usersBucket ' +
|
||||
'attributes to metadata',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
throw (errors.InternalError);
|
||||
}
|
||||
});
|
||||
|
@ -84,15 +83,15 @@ class BucketFileInterface {
|
|||
|
||||
createBucket(bucketName, bucketMD, log, cb) {
|
||||
this.getBucketAttributes(bucketName, log, err => {
|
||||
if (err && err !== errors.NoSuchBucket) {
|
||||
if (err && !err.is.NoSuchBucket) {
|
||||
return cb(err);
|
||||
}
|
||||
if (err === undefined) {
|
||||
return cb(errors.BucketAlreadyExists);
|
||||
}
|
||||
this.putBucketAttributes(bucketName,
|
||||
bucketMD,
|
||||
log, cb);
|
||||
bucketMD,
|
||||
log, cb);
|
||||
return undefined;
|
||||
});
|
||||
}
|
||||
|
@ -178,7 +177,7 @@ class BucketFileInterface {
|
|||
errorStack: err.stack,
|
||||
};
|
||||
log.error('error deleting bucket',
|
||||
logObj);
|
||||
logObj);
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
return cb();
|
||||
|
|
|
@ -146,7 +146,7 @@ const metastore = {
|
|||
return cb(null, {
|
||||
bucket: bucket.serialize(),
|
||||
obj: JSON.stringify(
|
||||
metadata.keyMaps.get(bucketName).get(objName)
|
||||
metadata.keyMaps.get(bucketName).get(objName),
|
||||
),
|
||||
});
|
||||
});
|
||||
|
@ -184,7 +184,7 @@ const metastore = {
|
|||
if (params && params.versionId) {
|
||||
const baseKey = inc(formatVersionKey(objName, ''));
|
||||
const vobjName = formatVersionKey(objName,
|
||||
params.versionId);
|
||||
params.versionId);
|
||||
metadata.keyMaps.get(bucketName).delete(vobjName);
|
||||
const mst = metadata.keyMaps.get(bucketName).get(objName);
|
||||
if (mst.versionId === params.versionId) {
|
||||
|
|
|
@ -151,7 +151,7 @@ function metadataGetObject(bucketName, objectKey, versionId, log, cb) {
|
|||
(err, objMD) => {
|
||||
if (err) {
|
||||
log.debug('err getting object MD from metadata',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
return cb(err);
|
||||
}
|
||||
if (versionId === 'null') {
|
||||
|
@ -190,7 +190,7 @@ function metadataValidateBucketAndObj(params, log, callback) {
|
|||
return next(errors.MethodNotAllowed, bucket);
|
||||
}
|
||||
if (!isBucketAuthorized(bucket, (preciseRequestType || requestType), canonicalID,
|
||||
authInfo, log, request)) {
|
||||
authInfo, log, request)) {
|
||||
log.debug('access denied for user on bucket', { requestType });
|
||||
return next(errors.AccessDenied, bucket);
|
||||
}
|
||||
|
|
|
@ -93,24 +93,24 @@ const metadata = {
|
|||
const value = typeof objVal.getValue === 'function' ?
|
||||
objVal.getValue() : objVal;
|
||||
client.putObject(bucketName, objName, value, params, log,
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', { implName, error: err });
|
||||
return cb(err);
|
||||
}
|
||||
if (data) {
|
||||
log.debug('object version successfully put in metadata',
|
||||
{ version: data });
|
||||
} else {
|
||||
log.debug('object successfully put in metadata');
|
||||
}
|
||||
return cb(err, data);
|
||||
});
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', { implName, error: err });
|
||||
return cb(err);
|
||||
}
|
||||
if (data) {
|
||||
log.debug('object version successfully put in metadata',
|
||||
{ version: data });
|
||||
} else {
|
||||
log.debug('object successfully put in metadata');
|
||||
}
|
||||
return cb(err, data);
|
||||
});
|
||||
},
|
||||
|
||||
getBucketAndObjectMD: (bucketName, objName, params, log, cb) => {
|
||||
log.debug('getting bucket and object from metadata',
|
||||
{ database: bucketName, object: objName });
|
||||
{ database: bucketName, object: objName });
|
||||
client.getBucketAndObject(bucketName, objName, params, log,
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
|
@ -118,7 +118,7 @@ const metadata = {
|
|||
return cb(err);
|
||||
}
|
||||
log.debug('bucket and object retrieved from metadata',
|
||||
{ database: bucketName, object: objName });
|
||||
{ database: bucketName, object: objName });
|
||||
return cb(err, data);
|
||||
});
|
||||
},
|
||||
|
|
|
@ -11,7 +11,7 @@ const locationConstraintCheck = require(
|
|||
'../api/apiUtils/object/locationConstraintCheck');
|
||||
const { dataStore } = require('../api/apiUtils/object/storeObject');
|
||||
const prepareRequestContexts = require(
|
||||
'../api/apiUtils/authorization/prepareRequestContexts');
|
||||
'../api/apiUtils/authorization/prepareRequestContexts');
|
||||
const { decodeVersionId } = require('../api/apiUtils/object/versioning');
|
||||
const locationKeysHaveChanged
|
||||
= require('../api/apiUtils/object/locationKeysHaveChanged');
|
||||
|
@ -81,7 +81,7 @@ function _getRequestPayload(req, cb) {
|
|||
payload.push(chunk);
|
||||
payloadLen += chunk.length;
|
||||
}).on('error', cb)
|
||||
.on('end', () => cb(null, Buffer.concat(payload, payloadLen).toString()));
|
||||
.on('end', () => cb(null, Buffer.concat(payload, payloadLen).toString()));
|
||||
}
|
||||
|
||||
function _checkMultipleBackendRequest(request, log) {
|
||||
|
@ -207,19 +207,19 @@ function handleTaggingOperation(request, response, type, dataStoreVersionId,
|
|||
}
|
||||
}
|
||||
return multipleBackendGateway.objectTagging(type, request.objectKey,
|
||||
request.bucketName, objectMD, log, err => {
|
||||
if (err) {
|
||||
log.error(`error during object tagging: ${type}`, {
|
||||
error: err,
|
||||
method: 'handleTaggingOperation',
|
||||
});
|
||||
return callback(err);
|
||||
}
|
||||
const dataRetrievalInfo = {
|
||||
versionId: dataStoreVersionId,
|
||||
};
|
||||
return _respond(response, dataRetrievalInfo, log, callback);
|
||||
});
|
||||
request.bucketName, objectMD, log, err => {
|
||||
if (err) {
|
||||
log.error(`error during object tagging: ${type}`, {
|
||||
error: err,
|
||||
method: 'handleTaggingOperation',
|
||||
});
|
||||
return callback(err);
|
||||
}
|
||||
const dataRetrievalInfo = {
|
||||
versionId: dataStoreVersionId,
|
||||
};
|
||||
return _respond(response, dataRetrievalInfo, log, callback);
|
||||
});
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -386,26 +386,26 @@ function putMetadata(request, response, bucketInfo, objMd, log, callback) {
|
|||
objectKey,
|
||||
});
|
||||
async.eachLimit(objMd.location, 5,
|
||||
(loc, next) => data.delete(loc, log, err => {
|
||||
if (err) {
|
||||
log.warn('error removing old data location key', {
|
||||
(loc, next) => data.delete(loc, log, err => {
|
||||
if (err) {
|
||||
log.warn('error removing old data location key', {
|
||||
bucketName,
|
||||
objectKey,
|
||||
locationKey: loc,
|
||||
error: err.message,
|
||||
});
|
||||
}
|
||||
// do not forward the error to let other
|
||||
// locations be deleted
|
||||
next();
|
||||
}),
|
||||
() => {
|
||||
log.debug('done removing old data locations', {
|
||||
method: 'putMetadata',
|
||||
bucketName,
|
||||
objectKey,
|
||||
locationKey: loc,
|
||||
error: err.message,
|
||||
});
|
||||
}
|
||||
// do not forward the error to let other
|
||||
// locations be deleted
|
||||
next();
|
||||
}),
|
||||
() => {
|
||||
log.debug('done removing old data locations', {
|
||||
method: 'putMetadata',
|
||||
bucketName,
|
||||
objectKey,
|
||||
});
|
||||
});
|
||||
}
|
||||
return _respond(response, md, log, callback);
|
||||
});
|
||||
|
@ -719,7 +719,7 @@ function batchDelete(request, response, log, callback) {
|
|||
log.trace('batch delete locations', { locations });
|
||||
return async.eachLimit(locations, 5, (loc, next) => {
|
||||
data.delete(loc, log, err => {
|
||||
if (err && err.ObjNotFound) {
|
||||
if (err?.is.ObjNotFound) {
|
||||
log.info('batch delete: data location do not exist', {
|
||||
method: 'batchDelete',
|
||||
location: loc,
|
||||
|
@ -787,7 +787,7 @@ function routeBackbeat(clientIP, request, response, log) {
|
|||
(backbeatRoutes[request.method] === undefined ||
|
||||
backbeatRoutes[request.method][request.resourceType] === undefined ||
|
||||
(backbeatRoutes[request.method][request.resourceType]
|
||||
[request.query.operation] === undefined &&
|
||||
[request.query.operation] === undefined &&
|
||||
useMultipleBackend));
|
||||
log.addDefaultFields({
|
||||
bucketName: request.bucketName,
|
||||
|
@ -835,49 +835,49 @@ function routeBackbeat(clientIP, request, response, log) {
|
|||
}
|
||||
return next(err, userInfo);
|
||||
}, 's3', requestContexts),
|
||||
(userInfo, next) => {
|
||||
if (useMultipleBackend) {
|
||||
// Bucket and object do not exist in metadata.
|
||||
return next(null, null, null);
|
||||
}
|
||||
const mdValParams = {
|
||||
(userInfo, next) => {
|
||||
if (useMultipleBackend) {
|
||||
// Bucket and object do not exist in metadata.
|
||||
return next(null, null, null);
|
||||
}
|
||||
const mdValParams = {
|
||||
bucketName: request.bucketName,
|
||||
objectKey: request.objectKey,
|
||||
authInfo: userInfo,
|
||||
versionId,
|
||||
requestType: 'ReplicateObject',
|
||||
request,
|
||||
};
|
||||
return metadataValidateBucketAndObj(mdValParams, log, next);
|
||||
},
|
||||
(bucketInfo, objMd, next) => {
|
||||
if (useMultipleBackend) {
|
||||
return backbeatRoutes[request.method][request.resourceType]
|
||||
[request.query.operation](request, response, log, next);
|
||||
}
|
||||
const versioningConfig = bucketInfo.getVersioningConfiguration();
|
||||
if (!versioningConfig || versioningConfig.Status !== 'Enabled') {
|
||||
log.debug('bucket versioning is not enabled', {
|
||||
method: request.method,
|
||||
bucketName: request.bucketName,
|
||||
objectKey: request.objectKey,
|
||||
authInfo: userInfo,
|
||||
versionId,
|
||||
requestType: 'ReplicateObject',
|
||||
request,
|
||||
};
|
||||
return metadataValidateBucketAndObj(mdValParams, log, next);
|
||||
},
|
||||
(bucketInfo, objMd, next) => {
|
||||
if (useMultipleBackend) {
|
||||
return backbeatRoutes[request.method][request.resourceType]
|
||||
[request.query.operation](request, response, log, next);
|
||||
}
|
||||
const versioningConfig = bucketInfo.getVersioningConfiguration();
|
||||
if (!versioningConfig || versioningConfig.Status !== 'Enabled') {
|
||||
log.debug('bucket versioning is not enabled', {
|
||||
method: request.method,
|
||||
bucketName: request.bucketName,
|
||||
objectKey: request.objectKey,
|
||||
resourceType: request.resourceType,
|
||||
});
|
||||
return next(errors.InvalidBucketState);
|
||||
}
|
||||
return backbeatRoutes[request.method][request.resourceType](
|
||||
request, response, bucketInfo, objMd, log, next);
|
||||
}],
|
||||
err => {
|
||||
if (err) {
|
||||
return responseJSONBody(err, null, response, log);
|
||||
}
|
||||
log.debug('backbeat route response sent successfully',
|
||||
{ method: request.method,
|
||||
bucketName: request.bucketName,
|
||||
objectKey: request.objectKey });
|
||||
return undefined;
|
||||
});
|
||||
resourceType: request.resourceType,
|
||||
});
|
||||
return next(errors.InvalidBucketState);
|
||||
}
|
||||
return backbeatRoutes[request.method][request.resourceType](
|
||||
request, response, bucketInfo, objMd, log, next);
|
||||
}],
|
||||
err => {
|
||||
if (err) {
|
||||
return responseJSONBody(err, null, response, log);
|
||||
}
|
||||
log.debug('backbeat route response sent successfully',
|
||||
{ method: request.method,
|
||||
bucketName: request.bucketName,
|
||||
objectKey: request.objectKey });
|
||||
return undefined;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -85,22 +85,22 @@ function routeMetadata(clientIP, request, response, log) {
|
|||
return metadataProxy.web(request, response, { target }, err => {
|
||||
if (err) {
|
||||
log.error('error proxying request to metadata admin server',
|
||||
{ error: err.message });
|
||||
{ error: err.message });
|
||||
return next(errors.ServiceUnavailable);
|
||||
}
|
||||
return next();
|
||||
});
|
||||
}],
|
||||
err => {
|
||||
if (err) {
|
||||
return responseJSONBody(err, null, response, log);
|
||||
}
|
||||
log.debug('metadata route response sent successfully',
|
||||
{ method: request.method,
|
||||
bucketName: request.bucketName,
|
||||
objectKey: request.objectKey });
|
||||
return undefined;
|
||||
});
|
||||
err => {
|
||||
if (err) {
|
||||
return responseJSONBody(err, null, response, log);
|
||||
}
|
||||
log.debug('metadata route response sent successfully',
|
||||
{ method: request.method,
|
||||
bucketName: request.bucketName,
|
||||
objectKey: request.objectKey });
|
||||
return undefined;
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ function getMetricToPush(prevObjectMD, newObjectMD) {
|
|||
assert.deepStrictEqual(prevObjectMD.getAcl(), newObjectMD.getAcl());
|
||||
assert.deepStrictEqual(
|
||||
prevObjectMD.getTags(),
|
||||
newObjectMD.getTags()
|
||||
newObjectMD.getTags(),
|
||||
);
|
||||
} catch (e) {
|
||||
return 'replicateTags';
|
||||
|
|
|
@ -10,10 +10,18 @@ const { clientCheck } = require('./utilities/healthcheckHandler');
|
|||
const _config = require('./Config').config;
|
||||
const { blacklistedPrefixes } = require('../constants');
|
||||
const api = require('./api/api');
|
||||
const data = require('./data/wrapper');
|
||||
const dataWrapper = require('./data/wrapper');
|
||||
const kms = require('./kms/wrapper');
|
||||
const locationStorageCheck =
|
||||
require('./api/apiUtils/object/locationStorageCheck');
|
||||
const vault = require('./auth/vault');
|
||||
const metadata = require('./metadata/wrapper');
|
||||
|
||||
const routes = arsenal.s3routes.routes;
|
||||
const { parseLC, MultipleBackendGateway } = arsenal.storage.data;
|
||||
const websiteEndpoints = _config.websiteEndpoints;
|
||||
let client = dataWrapper.client;
|
||||
const implName = dataWrapper.implName;
|
||||
|
||||
let allEndpoints;
|
||||
function updateAllEndpoints() {
|
||||
|
@ -21,6 +29,13 @@ function updateAllEndpoints() {
|
|||
}
|
||||
_config.on('rest-endpoints-update', updateAllEndpoints);
|
||||
updateAllEndpoints();
|
||||
_config.on('location-constraints-update', () => {
|
||||
if (implName === 'multipleBackends') {
|
||||
const clients = parseLC(_config, vault);
|
||||
client = new MultipleBackendGateway(
|
||||
clients, metadata, locationStorageCheck);
|
||||
}
|
||||
});
|
||||
|
||||
// redis client
|
||||
let localCacheClient;
|
||||
|
@ -78,7 +93,15 @@ class S3Server {
|
|||
allEndpoints,
|
||||
websiteEndpoints,
|
||||
blacklistedPrefixes,
|
||||
dataRetrievalFn: data.get,
|
||||
dataRetrievalParams: {
|
||||
client,
|
||||
implName,
|
||||
config: _config,
|
||||
kms,
|
||||
metadata,
|
||||
locStorageCheckFn: locationStorageCheck,
|
||||
vault,
|
||||
},
|
||||
};
|
||||
routes(req, res, params, logger, _config);
|
||||
}
|
||||
|
@ -144,7 +167,7 @@ class S3Server {
|
|||
cleanUp() {
|
||||
logger.info('server shutting down');
|
||||
Promise.all(this.servers.map(server =>
|
||||
new Promise(resolve => server.close(resolve))
|
||||
new Promise(resolve => server.close(resolve)),
|
||||
)).then(() => process.exit(0));
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ const services = {
|
|||
// buckets to list. By returning an empty array, the
|
||||
// getService API will just respond with the user info
|
||||
// without listing any buckets.
|
||||
if (err && err.NoSuchBucket) {
|
||||
if (err?.is.NoSuchBucket) {
|
||||
log.trace('no buckets found');
|
||||
// If we checked the old user bucket, that means we
|
||||
// already checked the new user bucket. If neither the
|
||||
|
@ -65,7 +65,7 @@ const services = {
|
|||
});
|
||||
},
|
||||
|
||||
/**
|
||||
/**
|
||||
* Check that hashedStream.completedHash matches header contentMd5.
|
||||
* @param {object} contentMD5 - content-md5 header
|
||||
* @param {string} completedHash - hashed stream once completed
|
||||
|
@ -76,7 +76,7 @@ const services = {
|
|||
checkHashMatchMD5(contentMD5, completedHash, log) {
|
||||
if (contentMD5 && completedHash && contentMD5 !== completedHash) {
|
||||
log.debug('contentMD5 and completedHash does not match',
|
||||
{ method: 'checkHashMatchMD5', completedHash, contentMD5 });
|
||||
{ method: 'checkHashMatchMD5', completedHash, contentMD5 });
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -236,7 +236,7 @@ const services = {
|
|||
return null;
|
||||
},
|
||||
callback => metadata.putObjectMD(bucketName, objectKey, md,
|
||||
options, log, callback),
|
||||
options, log, callback),
|
||||
], (err, data) => {
|
||||
if (err) {
|
||||
log.error('error from metadata', { error: err });
|
||||
|
@ -296,12 +296,12 @@ const services = {
|
|||
return cb(null, res);
|
||||
}
|
||||
return data.batchDelete(objectMD.location, null, null,
|
||||
deleteLog, err => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, res);
|
||||
});
|
||||
deleteLog, err => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, res);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -309,15 +309,15 @@ const services = {
|
|||
if (objGetInfo && objGetInfo[0]
|
||||
&& config.backends.data === 'multiple') {
|
||||
return multipleBackendGateway.protectAzureBlocks(bucketName,
|
||||
objectKey, objGetInfo[0].dataStoreName, log, err => {
|
||||
objectKey, objGetInfo[0].dataStoreName, log, err => {
|
||||
// if an error is returned, there is an MPU initiated with same
|
||||
// key name as object to delete
|
||||
if (err) {
|
||||
return cb(err.customizeDescription('Error deleting ' +
|
||||
if (err) {
|
||||
return cb(err.customizeDescription('Error deleting ' +
|
||||
`object on Azure: ${err.message}`));
|
||||
}
|
||||
return deleteMDandData();
|
||||
});
|
||||
}
|
||||
return deleteMDandData();
|
||||
});
|
||||
}
|
||||
return deleteMDandData();
|
||||
},
|
||||
|
@ -485,13 +485,13 @@ const services = {
|
|||
const multipartObjectMD = Object.assign({}, params.storedMetadata);
|
||||
multipartObjectMD.completeInProgress = true;
|
||||
metadata.putObjectMD(params.bucketName, longMPUIdentifier, multipartObjectMD,
|
||||
{}, log, err => {
|
||||
if (err) {
|
||||
log.error('error from metadata', { error: err });
|
||||
return cb(err);
|
||||
}
|
||||
return cb();
|
||||
});
|
||||
{}, log, err => {
|
||||
if (err) {
|
||||
log.error('error from metadata', { error: err });
|
||||
return cb(err);
|
||||
}
|
||||
return cb();
|
||||
});
|
||||
},
|
||||
|
||||
/**
|
||||
|
@ -555,7 +555,7 @@ const services = {
|
|||
// If the MPU was initiated, the mpu bucket should exist.
|
||||
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
||||
metadata.getBucket(mpuBucketName, log, (err, mpuBucket) => {
|
||||
if (err && err.NoSuchBucket) {
|
||||
if (err?.is.NoSuchBucket) {
|
||||
log.debug('bucket not found in metadata', { error: err,
|
||||
method: 'services.metadataValidateMultipart' });
|
||||
return cb(errors.NoSuchUpload);
|
||||
|
@ -577,7 +577,7 @@ const services = {
|
|||
metadata.getObjectMD(mpuBucket.getName(), mpuOverviewKey,
|
||||
{}, log, (err, storedMetadata) => {
|
||||
if (err) {
|
||||
if (err.NoSuchKey) {
|
||||
if (err.is.NoSuchKey) {
|
||||
return cb(errors.NoSuchUpload);
|
||||
}
|
||||
log.error('error from metadata', { error: err });
|
||||
|
@ -671,7 +671,7 @@ const services = {
|
|||
* @return {undefined}
|
||||
*/
|
||||
metadataStorePart(mpuBucketName, partLocations,
|
||||
metaStoreParams, log, cb) {
|
||||
metaStoreParams, log, cb) {
|
||||
assert.strictEqual(typeof mpuBucketName, 'string');
|
||||
const { partNumber, contentMD5, size, uploadId, lastModified, splitter }
|
||||
= metaStoreParams;
|
||||
|
@ -734,7 +734,7 @@ const services = {
|
|||
listParams.splitter = constants.oldSplitter;
|
||||
}
|
||||
metadata.listMultipartUploads(MPUbucketName, listParams, log,
|
||||
cb);
|
||||
cb);
|
||||
return undefined;
|
||||
});
|
||||
},
|
||||
|
@ -753,7 +753,7 @@ const services = {
|
|||
assert.strictEqual(typeof bucketName, 'string');
|
||||
const MPUBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
||||
metadata.getBucket(MPUBucketName, log, (err, bucket) => {
|
||||
if (err && err.NoSuchBucket) {
|
||||
if (err?.is.NoSuchBucket) {
|
||||
log.trace('no buckets found');
|
||||
const creationDate = new Date().toJSON();
|
||||
const mpuBucket = new BucketInfo(MPUBucketName,
|
||||
|
|
|
@ -241,7 +241,7 @@ aclUtils.convertToXml = grantInfo => {
|
|||
`<DisplayName>${escapeForXml(ownerInfo.displayName)}` +
|
||||
'</DisplayName>',
|
||||
'</Owner>',
|
||||
'<AccessControlList>'
|
||||
'<AccessControlList>',
|
||||
);
|
||||
|
||||
grants.forEach(grant => {
|
||||
|
@ -252,29 +252,29 @@ aclUtils.convertToXml = grantInfo => {
|
|||
if (grant.ID) {
|
||||
xml.push('<Grantee xmlns:xsi="http://www.w3.org/2001/' +
|
||||
'XMLSchema-instance" xsi:type="CanonicalUser">',
|
||||
`<ID>${grant.ID}</ID>`
|
||||
`<ID>${grant.ID}</ID>`,
|
||||
);
|
||||
} else if (grant.URI) {
|
||||
xml.push('<Grantee xmlns:xsi="http://www.w3.org/2001/' +
|
||||
'XMLSchema-instance" xsi:type="Group">',
|
||||
`<URI>${escapeForXml(grant.URI)}</URI>`
|
||||
`<URI>${escapeForXml(grant.URI)}</URI>`,
|
||||
);
|
||||
}
|
||||
|
||||
if (grant.displayName) {
|
||||
xml.push(`<DisplayName>${escapeForXml(grant.displayName)}` +
|
||||
'</DisplayName>'
|
||||
'</DisplayName>',
|
||||
);
|
||||
}
|
||||
|
||||
xml.push('</Grantee>',
|
||||
`<Permission>${grant.permission}</Permission>`,
|
||||
'</Grant>'
|
||||
'</Grant>',
|
||||
);
|
||||
});
|
||||
|
||||
xml.push('</AccessControlList>',
|
||||
'</AccessControlPolicy>'
|
||||
'</AccessControlPolicy>',
|
||||
);
|
||||
|
||||
return xml.join('');
|
||||
|
@ -351,7 +351,7 @@ aclUtils.getCanonicalIDs = function getCanonicalIDs(acl) {
|
|||
acl.WRITE,
|
||||
acl.WRITE_ACP,
|
||||
acl.READ,
|
||||
acl.READ_ACP
|
||||
acl.READ_ACP,
|
||||
);
|
||||
const uniqueGrantees = Array.from(new Set(aclGrantees));
|
||||
// grantees can be a mix of canonicalIDs and predefined groups in the form
|
||||
|
|
|
@ -70,7 +70,7 @@ function clientCheck(flightCheckOnStartUp, log, cb) {
|
|||
// if there is an error from an external backend,
|
||||
// only return a 500 if it is on startup
|
||||
// (flightCheckOnStartUp set to true)
|
||||
obj[k].error && (flightCheckOnStartUp || !obj[k].external)
|
||||
obj[k].error && (flightCheckOnStartUp || !obj[k].external),
|
||||
);
|
||||
if (fail) {
|
||||
return cb(errors.InternalError, obj);
|
||||
|
@ -123,7 +123,7 @@ function healthcheckHandler(clientIP, req, res, log, statsClient) {
|
|||
}
|
||||
const deep = (req.url === '/_/healthcheck/deep');
|
||||
return routeHandler(deep, req, res, log, statsClient,
|
||||
healthcheckEndHandler);
|
||||
healthcheckEndHandler);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
|
|
|
@ -68,12 +68,12 @@ function getSystemStats() {
|
|||
idle: prev.idle + cur.idle,
|
||||
irq: prev.irq + cur.irq,
|
||||
}), {
|
||||
user: 0,
|
||||
nice: 0,
|
||||
sys: 0,
|
||||
idle: 0,
|
||||
irq: 0,
|
||||
});
|
||||
user: 0,
|
||||
nice: 0,
|
||||
sys: 0,
|
||||
idle: 0,
|
||||
irq: 0,
|
||||
});
|
||||
|
||||
return {
|
||||
memory: {
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
"homepage": "https://github.com/scality/S3#readme",
|
||||
"dependencies": {
|
||||
"@hapi/joi": "^17.1.0",
|
||||
"arsenal": "git+https://github.com/scality/arsenal#7.10.15",
|
||||
"arsenal": "git+https://github.com/scality/arsenal#7.10.23",
|
||||
"async": "~2.5.0",
|
||||
"aws-sdk": "2.905.0",
|
||||
"azure-storage": "^2.1.0",
|
||||
|
@ -44,7 +44,7 @@
|
|||
},
|
||||
"devDependencies": {
|
||||
"bluebird": "^3.3.1",
|
||||
"eslint": "^2.4.0",
|
||||
"eslint": "^8.14.0",
|
||||
"eslint-config-airbnb": "^6.0.0",
|
||||
"eslint-config-scality": "scality/Guidelines#7.10.2",
|
||||
"ioredis": "4.9.5",
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
require('./test.js'); // eslint-disable-line import/no-unresolved
|
||||
require('./test.js');
|
||||
|
|
|
@ -25,12 +25,12 @@ class BucketUtility {
|
|||
Bucket: bucketName,
|
||||
ObjectLockEnabledForBucket: true,
|
||||
}).promise()
|
||||
.then(() => bucketName);
|
||||
.then(() => bucketName);
|
||||
}
|
||||
|
||||
createMany(bucketNames) {
|
||||
const promises = bucketNames.map(
|
||||
bucketName => this.createOne(bucketName)
|
||||
bucketName => this.createOne(bucketName),
|
||||
);
|
||||
|
||||
return Promise.all(promises);
|
||||
|
@ -57,7 +57,7 @@ class BucketUtility {
|
|||
|
||||
deleteMany(bucketNames) {
|
||||
const promises = bucketNames.map(
|
||||
bucketName => this.deleteOne(bucketName)
|
||||
bucketName => this.deleteOne(bucketName),
|
||||
);
|
||||
|
||||
return Promise.all(promises);
|
||||
|
@ -87,7 +87,7 @@ class BucketUtility {
|
|||
Key: object.Key,
|
||||
VersionId: object.VersionId,
|
||||
}).promise()
|
||||
.then(() => object)
|
||||
.then(() => object),
|
||||
)
|
||||
.concat(data.Versions
|
||||
.filter(object => object.Key.endsWith('/'))
|
||||
|
@ -98,24 +98,24 @@ class BucketUtility {
|
|||
Key: object.Key,
|
||||
VersionId: object.VersionId,
|
||||
}).promise()
|
||||
.then(() => object)
|
||||
)
|
||||
.then(() => object),
|
||||
),
|
||||
)
|
||||
.concat(data.DeleteMarkers
|
||||
.map(object =>
|
||||
this.s3.deleteObject({
|
||||
Bucket: bucketName,
|
||||
Key: object.Key,
|
||||
VersionId: object.VersionId,
|
||||
}).promise()
|
||||
.then(() => object)))
|
||||
)
|
||||
this.s3.deleteObject({
|
||||
Bucket: bucketName,
|
||||
Key: object.Key,
|
||||
VersionId: object.VersionId,
|
||||
}).promise()
|
||||
.then(() => object))),
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
emptyMany(bucketNames) {
|
||||
const promises = bucketNames.map(
|
||||
bucketName => this.empty(bucketName)
|
||||
bucketName => this.empty(bucketName),
|
||||
);
|
||||
|
||||
return Promise.all(promises);
|
||||
|
|
|
@ -63,12 +63,12 @@ function methodRequest(params, callback) {
|
|||
`<Code>${code}</Code>` : '';
|
||||
assert(total.indexOf(message) > -1, `Expected ${message}`);
|
||||
assert.deepEqual(res.statusCode, statusCode[code],
|
||||
`status code expected: ${statusCode[code]}`);
|
||||
`status code expected: ${statusCode[code]}`);
|
||||
}
|
||||
if (headersResponse) {
|
||||
Object.keys(headersResponse).forEach(key => {
|
||||
assert.deepEqual(res.headers[key], headersResponse[key],
|
||||
`error header: ${key}`);
|
||||
`error header: ${key}`);
|
||||
});
|
||||
} else {
|
||||
// if no headersResponse provided, should not have these headers
|
||||
|
@ -77,9 +77,9 @@ function methodRequest(params, callback) {
|
|||
'access-control-allow-methods',
|
||||
'access-control-allow-credentials',
|
||||
'vary'].forEach(key => {
|
||||
assert.strictEqual(res.headers[key], undefined,
|
||||
assert.strictEqual(res.headers[key], undefined,
|
||||
`Error: ${key} should not have value`);
|
||||
});
|
||||
});
|
||||
}
|
||||
if (headersOmitted) {
|
||||
headersOmitted.forEach(key => {
|
||||
|
|
|
@ -23,7 +23,7 @@ function createEncryptedBucket(bucketParams, cb) {
|
|||
if (bucketParams.CreateBucketConfiguration &&
|
||||
bucketParams.CreateBucketConfiguration.LocationConstraint) {
|
||||
locationConstraint = bucketParams.CreateBucketConfiguration
|
||||
.LocationConstraint;
|
||||
.LocationConstraint;
|
||||
}
|
||||
|
||||
const prog = `${__dirname}/../../../../../bin/create_encrypted_bucket.js`;
|
||||
|
@ -44,23 +44,23 @@ function createEncryptedBucket(bucketParams, cb) {
|
|||
}
|
||||
const body = [];
|
||||
const child = childProcess.spawn(args[0], args)
|
||||
.on('exit', () => {
|
||||
const hasSucceed = body.join('').split('\n').find(item => {
|
||||
const json = safeJSONParse(item);
|
||||
const test = !(json instanceof Error) && json.name === 'S3' &&
|
||||
.on('exit', () => {
|
||||
const hasSucceed = body.join('').split('\n').find(item => {
|
||||
const json = safeJSONParse(item);
|
||||
const test = !(json instanceof Error) && json.name === 'S3' &&
|
||||
json.statusCode === 200;
|
||||
if (test) {
|
||||
return true;
|
||||
if (test) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
});
|
||||
if (!hasSucceed) {
|
||||
process.stderr.write(`${body.join('')}\n`);
|
||||
return cb(new Error('Cannot create encrypted bucket'));
|
||||
}
|
||||
return false;
|
||||
});
|
||||
if (!hasSucceed) {
|
||||
process.stderr.write(`${body.join('')}\n`);
|
||||
return cb(new Error('Cannot create encrypted bucket'));
|
||||
}
|
||||
return cb();
|
||||
})
|
||||
.on('error', cb);
|
||||
return cb();
|
||||
})
|
||||
.on('error', cb);
|
||||
child.stdout.on('data', chunk => body.push(chunk.toString()));
|
||||
}
|
||||
|
||||
|
|
|
@ -92,21 +92,21 @@ function _assertResponseHtml404(method, response, type) {
|
|||
if (method === 'HEAD') {
|
||||
if (type === '404-no-such-bucket') {
|
||||
assert.strictEqual(response.headers['x-amz-error-code'],
|
||||
'NoSuchBucket');
|
||||
'NoSuchBucket');
|
||||
// Need arsenal fixed to remove period at the end
|
||||
// so compatible with aws
|
||||
assert.strictEqual(response.headers['x-amz-error-message'],
|
||||
'The specified bucket does not exist.');
|
||||
'The specified bucket does not exist.');
|
||||
} else if (type === '404-no-such-website-configuration') {
|
||||
assert.strictEqual(response.headers['x-amz-error-code'],
|
||||
'NoSuchWebsiteConfiguration');
|
||||
'NoSuchWebsiteConfiguration');
|
||||
assert.strictEqual(response.headers['x-amz-error-message'],
|
||||
'The specified bucket does not have a website configuration');
|
||||
'The specified bucket does not have a website configuration');
|
||||
} else if (type === '404-not-found') {
|
||||
assert.strictEqual(response.headers['x-amz-error-code'],
|
||||
'NoSuchKey');
|
||||
'NoSuchKey');
|
||||
assert.strictEqual(response.headers['x-amz-error-message'],
|
||||
'The specified key does not exist.');
|
||||
'The specified key does not exist.');
|
||||
} else {
|
||||
throw new Error(`'${type}' is not a recognized 404 ` +
|
||||
'error checked in the WebsiteConfigTester.checkHTML function');
|
||||
|
@ -146,9 +146,9 @@ function _assertResponseHtml403(method, response, type) {
|
|||
if (method === 'HEAD') {
|
||||
if (type === '403-access-denied') {
|
||||
assert.strictEqual(response.headers['x-amz-error-code'],
|
||||
'AccessDenied');
|
||||
'AccessDenied');
|
||||
assert.strictEqual(response.headers['x-amz-error-message'],
|
||||
'Access Denied');
|
||||
'Access Denied');
|
||||
} else if (type !== '403-retrieve-error-document') {
|
||||
throw new Error(`'${type}' is not a recognized 403 ` +
|
||||
'error checked in the WebsiteConfigTester.checkHTML function');
|
||||
|
@ -163,17 +163,17 @@ function _assertResponseHtml403(method, response, type) {
|
|||
]);
|
||||
if (type === '403-retrieve-error-document') {
|
||||
_assertResponseHtml(response.body, 'h3',
|
||||
'An Error Occurred While Attempting to ' +
|
||||
'An Error Occurred While Attempting to ' +
|
||||
'Retrieve a Custom Error Document');
|
||||
// start searching for second `ul` element after `h3` element
|
||||
const startingTag = '</h3>';
|
||||
const startIndex = response.body.indexOf(startingTag)
|
||||
+ startingTag.length;
|
||||
_assertResponseHtml(response.body.slice(startIndex),
|
||||
'ul', [
|
||||
'Code: AccessDenied',
|
||||
'Message: Access Denied',
|
||||
]);
|
||||
'ul', [
|
||||
'Code: AccessDenied',
|
||||
'Message: Access Denied',
|
||||
]);
|
||||
} else if (type !== '403-access-denied') {
|
||||
throw new Error(`'${type}' is not a recognized 403 ` +
|
||||
'error checked in the WebsiteConfigTester.checkHTML function');
|
||||
|
@ -213,9 +213,9 @@ function _assertResponseHtmlRedirect(response, type, redirectUrl, method) {
|
|||
// no need to check HTML
|
||||
}
|
||||
_assertResponseHtml(response.body, 'title',
|
||||
'Best redirect link ever');
|
||||
'Best redirect link ever');
|
||||
_assertResponseHtml(response.body, 'h1',
|
||||
'Welcome to your redirection file');
|
||||
'Welcome to your redirection file');
|
||||
} else {
|
||||
throw new Error(`'${type}' is not a recognized redirect type ` +
|
||||
'checked in the WebsiteConfigTester.checkHTML function');
|
||||
|
@ -327,29 +327,29 @@ class WebsiteConfigTester {
|
|||
|
||||
static createPutBucketWebsite(s3, bucket, bucketACL, objects, done) {
|
||||
s3.createBucket({ Bucket: bucket, ACL: bucketACL },
|
||||
err => {
|
||||
if (err) {
|
||||
return done(err);
|
||||
}
|
||||
const webConfig = new WebsiteConfigTester('index.html',
|
||||
'error.html');
|
||||
return s3.putBucketWebsite({ Bucket: bucket,
|
||||
WebsiteConfiguration: webConfig }, err => {
|
||||
err => {
|
||||
if (err) {
|
||||
return done(err);
|
||||
}
|
||||
return async.forEachOf(objects,
|
||||
(acl, object, next) => {
|
||||
s3.putObject({ Bucket: bucket,
|
||||
Key: `${object}.html`,
|
||||
ACL: acl,
|
||||
Body: fs.readFileSync(path.join(__dirname,
|
||||
`/../../test/object/websiteFiles/${object}.html`)),
|
||||
},
|
||||
next);
|
||||
}, done);
|
||||
const webConfig = new WebsiteConfigTester('index.html',
|
||||
'error.html');
|
||||
return s3.putBucketWebsite({ Bucket: bucket,
|
||||
WebsiteConfiguration: webConfig }, err => {
|
||||
if (err) {
|
||||
return done(err);
|
||||
}
|
||||
return async.forEachOf(objects,
|
||||
(acl, object, next) => {
|
||||
s3.putObject({ Bucket: bucket,
|
||||
Key: `${object}.html`,
|
||||
ACL: acl,
|
||||
Body: fs.readFileSync(path.join(__dirname,
|
||||
`/../../test/object/websiteFiles/${object}.html`)),
|
||||
},
|
||||
next);
|
||||
}, done);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
static deleteObjectsThenBucket(s3, bucket, objects, done) {
|
||||
|
|
|
@ -179,7 +179,7 @@ withV4(sigCfg => {
|
|||
assert.notStrictEqual(err, null);
|
||||
assert.strictEqual(
|
||||
err.statusCode,
|
||||
errors.AccessDenied.code
|
||||
errors.AccessDenied.code,
|
||||
);
|
||||
}
|
||||
done();
|
||||
|
|
|
@ -52,7 +52,7 @@ describe('aws-sdk test delete bucket lifecycle', () => {
|
|||
|
||||
it('should return AccessDenied if user is not bucket owner', done => {
|
||||
otherAccountS3.deleteBucketLifecycle({ Bucket: bucket },
|
||||
err => assertError(err, 'AccessDenied', done));
|
||||
err => assertError(err, 'AccessDenied', done));
|
||||
});
|
||||
|
||||
it('should return no error if no lifecycle config on bucket', done => {
|
||||
|
@ -68,8 +68,8 @@ describe('aws-sdk test delete bucket lifecycle', () => {
|
|||
s3.deleteBucketLifecycle({ Bucket: bucket }, err => {
|
||||
assert.equal(err, null);
|
||||
s3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
||||
err =>
|
||||
assertError(err, 'NoSuchLifecycleConfiguration', done));
|
||||
err =>
|
||||
assertError(err, 'NoSuchLifecycleConfiguration', done));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -54,7 +54,7 @@ describe('aws-sdk test delete bucket policy', () => {
|
|||
|
||||
it('should return MethodNotAllowed if user is not bucket owner', done => {
|
||||
otherAccountS3.deleteBucketPolicy({ Bucket: bucket },
|
||||
err => assertError(err, 'MethodNotAllowed', done));
|
||||
err => assertError(err, 'MethodNotAllowed', done));
|
||||
});
|
||||
|
||||
it('should return no error if no policy on bucket', done => {
|
||||
|
@ -69,8 +69,8 @@ describe('aws-sdk test delete bucket policy', () => {
|
|||
s3.deleteBucketPolicy({ Bucket: bucket }, err => {
|
||||
assert.equal(err, null);
|
||||
s3.getBucketPolicy({ Bucket: bucket },
|
||||
err =>
|
||||
assertError(err, 'NoSuchBucketPolicy', done));
|
||||
err =>
|
||||
assertError(err, 'NoSuchBucketPolicy', done));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -81,7 +81,7 @@ describe('aws-node-sdk test deleteBucketReplication', () => {
|
|||
}),
|
||||
next => deleteReplicationAndCheckResponse(bucket, next),
|
||||
next => s3.getBucketReplication({ Bucket: bucket }, err => {
|
||||
assert(errors.ReplicationConfigurationNotFoundError[err.code]);
|
||||
assert(errors.ReplicationConfigurationNotFoundError.is[err.code]);
|
||||
return next();
|
||||
}),
|
||||
], done));
|
||||
|
|
|
@ -43,14 +43,14 @@ describe('DELETE bucket cors', () => {
|
|||
describe('without existing cors configuration', () => {
|
||||
it('should return a 204 response', done => {
|
||||
s3.deleteBucketCors({ Bucket: bucketName },
|
||||
function deleteBucketCors(err) {
|
||||
const statusCode = this.httpResponse.statusCode;
|
||||
assert.strictEqual(statusCode, 204,
|
||||
`Found unexpected statusCode ${statusCode}`);
|
||||
assert.strictEqual(err, null,
|
||||
`Found unexpected err ${err}`);
|
||||
return done();
|
||||
});
|
||||
function deleteBucketCors(err) {
|
||||
const statusCode = this.httpResponse.statusCode;
|
||||
assert.strictEqual(statusCode, 204,
|
||||
`Found unexpected statusCode ${statusCode}`);
|
||||
assert.strictEqual(err, null,
|
||||
`Found unexpected err ${err}`);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
|
@ -62,19 +62,19 @@ describe('DELETE bucket cors', () => {
|
|||
|
||||
it('should delete bucket configuration successfully', done => {
|
||||
s3.deleteBucketCors({ Bucket: bucketName },
|
||||
function deleteBucketCors(err) {
|
||||
const statusCode = this.httpResponse.statusCode;
|
||||
assert.strictEqual(statusCode, 204,
|
||||
`Found unexpected statusCode ${statusCode}`);
|
||||
assert.strictEqual(err, null,
|
||||
`Found unexpected err ${err}`);
|
||||
s3.getBucketCors({ Bucket: bucketName }, err => {
|
||||
assert.strictEqual(err.code,
|
||||
'NoSuchCORSConfiguration');
|
||||
assert.strictEqual(err.statusCode, 404);
|
||||
return done();
|
||||
function deleteBucketCors(err) {
|
||||
const statusCode = this.httpResponse.statusCode;
|
||||
assert.strictEqual(statusCode, 204,
|
||||
`Found unexpected statusCode ${statusCode}`);
|
||||
assert.strictEqual(err, null,
|
||||
`Found unexpected err ${err}`);
|
||||
s3.getBucketCors({ Bucket: bucketName }, err => {
|
||||
assert.strictEqual(err.code,
|
||||
'NoSuchCORSConfiguration');
|
||||
assert.strictEqual(err.statusCode, 404);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// Skip if AWS because AWS Node SDK raises CredentialsError
|
||||
|
@ -86,12 +86,12 @@ describe('DELETE bucket cors', () => {
|
|||
itSkipIfAWS('should return AccessDenied if user is not bucket' +
|
||||
'owner', done => {
|
||||
otherAccountS3.deleteBucketCors({ Bucket: bucketName },
|
||||
err => {
|
||||
assert(err);
|
||||
assert.strictEqual(err.code, 'AccessDenied');
|
||||
assert.strictEqual(err.statusCode, 403);
|
||||
return done();
|
||||
});
|
||||
err => {
|
||||
assert(err);
|
||||
assert.strictEqual(err.code, 'AccessDenied');
|
||||
assert.strictEqual(err.statusCode, 403);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -59,15 +59,15 @@ describe('DELETE bucket website', () => {
|
|||
});
|
||||
|
||||
it('should return AccessDenied if user is not bucket owner',
|
||||
done => {
|
||||
otherAccountS3.deleteBucketWebsite({ Bucket: bucketName },
|
||||
err => {
|
||||
assert(err);
|
||||
assert.strictEqual(err.code, 'AccessDenied');
|
||||
assert.strictEqual(err.statusCode, 403);
|
||||
return done();
|
||||
done => {
|
||||
otherAccountS3.deleteBucketWebsite({ Bucket: bucketName },
|
||||
err => {
|
||||
assert(err);
|
||||
assert.strictEqual(err.code, 'AccessDenied');
|
||||
assert.strictEqual(err.statusCode, 403);
|
||||
return done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -129,7 +129,7 @@ const tests = [
|
|||
Body: '{}' },
|
||||
{ Bucket, Key:
|
||||
'!exclamationPointObjTitle/!exclamationPointObjTitle',
|
||||
Body: '{}' },
|
||||
Body: '{}' },
|
||||
{ Bucket, Key: '-dashObjTitle/' },
|
||||
{ Bucket, Key: '-dashObjTitle/objTitleA', Body: '{}' },
|
||||
{ Bucket, Key: '-dashObjTitle/-dashObjTitle', Body: '{}' },
|
||||
|
@ -157,7 +157,7 @@ const tests = [
|
|||
Body: '{}' },
|
||||
{ Bucket, Key:
|
||||
'山chineseMountainObjTitle/山chineseMountainObjTitle',
|
||||
Body: '{}' },
|
||||
Body: '{}' },
|
||||
{ Bucket, Key: 'àaGraveLowerCaseObjTitle' },
|
||||
{ Bucket, Key: 'àaGraveLowerCaseObjTitle/objTitleA',
|
||||
Body: '{}' },
|
||||
|
@ -294,17 +294,17 @@ describe('GET Bucket - AWS.S3.listObjects', () => {
|
|||
before(done => {
|
||||
bucketUtil = new BucketUtility();
|
||||
bucketUtil.createRandom(1)
|
||||
.then(created => {
|
||||
bucketName = created;
|
||||
done();
|
||||
})
|
||||
.catch(done);
|
||||
.then(created => {
|
||||
bucketName = created;
|
||||
done();
|
||||
})
|
||||
.catch(done);
|
||||
});
|
||||
|
||||
after(done => {
|
||||
bucketUtil.deleteOne(bucketName)
|
||||
.then(() => done())
|
||||
.catch(done);
|
||||
.then(() => done())
|
||||
.catch(done);
|
||||
});
|
||||
|
||||
it('should return 403 and AccessDenied on a private bucket', done => {
|
||||
|
@ -326,11 +326,11 @@ describe('GET Bucket - AWS.S3.listObjects', () => {
|
|||
before(done => {
|
||||
bucketUtil = new BucketUtility('default', sigCfg);
|
||||
bucketUtil.createRandom(1)
|
||||
.then(created => {
|
||||
bucketName = created;
|
||||
done();
|
||||
})
|
||||
.catch(done);
|
||||
.then(created => {
|
||||
bucketName = created;
|
||||
done();
|
||||
})
|
||||
.catch(done);
|
||||
});
|
||||
|
||||
after(done => {
|
||||
|
@ -492,57 +492,57 @@ describe('GET Bucket - AWS.S3.listObjects', () => {
|
|||
|
||||
['&', '"quot', '\'apos', '<lt', '>gt'].forEach(k => {
|
||||
it(`should list objects with key ${k} as ContinuationToken`,
|
||||
done => {
|
||||
const s3 = bucketUtil.s3;
|
||||
const Bucket = bucketName;
|
||||
const objects = [{ Bucket, Key: k }];
|
||||
done => {
|
||||
const s3 = bucketUtil.s3;
|
||||
const Bucket = bucketName;
|
||||
const objects = [{ Bucket, Key: k }];
|
||||
|
||||
Promise
|
||||
.mapSeries(objects, param => s3.putObject(param).promise())
|
||||
.then(() => s3.listObjectsV2({
|
||||
Bucket,
|
||||
ContinuationToken: generateToken(k),
|
||||
}).promise())
|
||||
.then(data => {
|
||||
const isValidResponse = tv4.validate(data,
|
||||
bucketSchemaV2);
|
||||
if (!isValidResponse) {
|
||||
throw new Error(tv4.error);
|
||||
}
|
||||
return data;
|
||||
}).then(data => {
|
||||
assert.deepStrictEqual(
|
||||
decryptToken(data.ContinuationToken), k);
|
||||
done();
|
||||
})
|
||||
.catch(done);
|
||||
});
|
||||
Promise
|
||||
.mapSeries(objects, param => s3.putObject(param).promise())
|
||||
.then(() => s3.listObjectsV2({
|
||||
Bucket,
|
||||
ContinuationToken: generateToken(k),
|
||||
}).promise())
|
||||
.then(data => {
|
||||
const isValidResponse = tv4.validate(data,
|
||||
bucketSchemaV2);
|
||||
if (!isValidResponse) {
|
||||
throw new Error(tv4.error);
|
||||
}
|
||||
return data;
|
||||
}).then(data => {
|
||||
assert.deepStrictEqual(
|
||||
decryptToken(data.ContinuationToken), k);
|
||||
done();
|
||||
})
|
||||
.catch(done);
|
||||
});
|
||||
});
|
||||
|
||||
['&', '"quot', '\'apos', '<lt', '>gt'].forEach(k => {
|
||||
it(`should list objects with key ${k} as NextContinuationToken`,
|
||||
done => {
|
||||
const s3 = bucketUtil.s3;
|
||||
const Bucket = bucketName;
|
||||
const objects = [{ Bucket, Key: k }, { Bucket, Key: 'zzz' }];
|
||||
Promise
|
||||
.mapSeries(objects, param => s3.putObject(param).promise())
|
||||
.then(() => s3.listObjectsV2({ Bucket, MaxKeys: 1,
|
||||
Delimiter: 'foo' }).promise())
|
||||
.then(data => {
|
||||
const isValidResponse = tv4.validate(data,
|
||||
bucketSchemaV2);
|
||||
if (!isValidResponse) {
|
||||
throw new Error(tv4.error);
|
||||
}
|
||||
return data;
|
||||
}).then(data => {
|
||||
assert.strictEqual(
|
||||
decryptToken(data.NextContinuationToken), k);
|
||||
done();
|
||||
})
|
||||
.catch(done);
|
||||
});
|
||||
done => {
|
||||
const s3 = bucketUtil.s3;
|
||||
const Bucket = bucketName;
|
||||
const objects = [{ Bucket, Key: k }, { Bucket, Key: 'zzz' }];
|
||||
Promise
|
||||
.mapSeries(objects, param => s3.putObject(param).promise())
|
||||
.then(() => s3.listObjectsV2({ Bucket, MaxKeys: 1,
|
||||
Delimiter: 'foo' }).promise())
|
||||
.then(data => {
|
||||
const isValidResponse = tv4.validate(data,
|
||||
bucketSchemaV2);
|
||||
if (!isValidResponse) {
|
||||
throw new Error(tv4.error);
|
||||
}
|
||||
return data;
|
||||
}).then(data => {
|
||||
assert.strictEqual(
|
||||
decryptToken(data.NextContinuationToken), k);
|
||||
done();
|
||||
})
|
||||
.catch(done);
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -56,7 +56,7 @@ describe('aws-sdk test get bucket encryption', () => {
|
|||
|
||||
it('should include KMSMasterKeyID if user has configured a custom master key', done => {
|
||||
setEncryptionInfo({ cryptoScheme: 1, algorithm: 'aws:kms', masterKeyId: '12345',
|
||||
configuredMasterKeyId: '54321', mandatory: true }, err => {
|
||||
configuredMasterKeyId: '54321', mandatory: true }, err => {
|
||||
assert.ifError(err);
|
||||
s3.getBucketEncryption({ Bucket: bucketName }, (err, res) => {
|
||||
assert.ifError(err);
|
||||
|
|
|
@ -44,7 +44,7 @@ describe('aws-sdk test get bucket lifecycle', () => {
|
|||
|
||||
it('should return AccessDenied if user is not bucket owner', done => {
|
||||
otherAccountS3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
||||
err => assertError(err, 'AccessDenied', done));
|
||||
err => assertError(err, 'AccessDenied', done));
|
||||
});
|
||||
|
||||
it('should return NoSuchLifecycleConfiguration error if no lifecycle ' +
|
||||
|
@ -68,20 +68,20 @@ describe('aws-sdk test get bucket lifecycle', () => {
|
|||
}, err => {
|
||||
assert.equal(err, null, `Err putting lifecycle config: ${err}`);
|
||||
s3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
||||
(err, res) => {
|
||||
assert.equal(err, null, 'Error getting lifecycle config: ' +
|
||||
(err, res) => {
|
||||
assert.equal(err, null, 'Error getting lifecycle config: ' +
|
||||
`${err}`);
|
||||
assert.strictEqual(res.Rules.length, 1);
|
||||
assert.deepStrictEqual(res.Rules[0], {
|
||||
Expiration: { Days: 1 },
|
||||
ID: 'test-id',
|
||||
Prefix: '',
|
||||
Status: 'Enabled',
|
||||
Transitions: [],
|
||||
NoncurrentVersionTransitions: [],
|
||||
assert.strictEqual(res.Rules.length, 1);
|
||||
assert.deepStrictEqual(res.Rules[0], {
|
||||
Expiration: { Days: 1 },
|
||||
ID: 'test-id',
|
||||
Prefix: '',
|
||||
Status: 'Enabled',
|
||||
Transitions: [],
|
||||
NoncurrentVersionTransitions: [],
|
||||
});
|
||||
done();
|
||||
});
|
||||
done();
|
||||
});
|
||||
}));
|
||||
|
||||
it('should get bucket lifecycle config with filter prefix', done =>
|
||||
|
@ -98,71 +98,71 @@ describe('aws-sdk test get bucket lifecycle', () => {
|
|||
}, err => {
|
||||
assert.equal(err, null, `Err putting lifecycle config: ${err}`);
|
||||
s3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
||||
(err, res) => {
|
||||
assert.equal(err, null, 'Error getting lifecycle config: ' +
|
||||
(err, res) => {
|
||||
assert.equal(err, null, 'Error getting lifecycle config: ' +
|
||||
`${err}`);
|
||||
assert.strictEqual(res.Rules.length, 1);
|
||||
assert.deepStrictEqual(res.Rules[0], {
|
||||
Expiration: { Days: 1 },
|
||||
ID: 'test-id',
|
||||
Filter: { Prefix: '' },
|
||||
Status: 'Enabled',
|
||||
Transitions: [],
|
||||
NoncurrentVersionTransitions: [],
|
||||
assert.strictEqual(res.Rules.length, 1);
|
||||
assert.deepStrictEqual(res.Rules[0], {
|
||||
Expiration: { Days: 1 },
|
||||
ID: 'test-id',
|
||||
Filter: { Prefix: '' },
|
||||
Status: 'Enabled',
|
||||
Transitions: [],
|
||||
NoncurrentVersionTransitions: [],
|
||||
});
|
||||
done();
|
||||
});
|
||||
done();
|
||||
});
|
||||
}));
|
||||
|
||||
it('should get bucket lifecycle config with filter prefix and tags',
|
||||
done =>
|
||||
s3.putBucketLifecycleConfiguration({
|
||||
Bucket: bucket,
|
||||
LifecycleConfiguration: {
|
||||
Rules: [{
|
||||
ID: 'test-id',
|
||||
Status: 'Enabled',
|
||||
Filter: {
|
||||
And: {
|
||||
Prefix: '',
|
||||
Tags: [
|
||||
{
|
||||
Key: 'key',
|
||||
Value: 'value',
|
||||
},
|
||||
],
|
||||
done =>
|
||||
s3.putBucketLifecycleConfiguration({
|
||||
Bucket: bucket,
|
||||
LifecycleConfiguration: {
|
||||
Rules: [{
|
||||
ID: 'test-id',
|
||||
Status: 'Enabled',
|
||||
Filter: {
|
||||
And: {
|
||||
Prefix: '',
|
||||
Tags: [
|
||||
{
|
||||
Key: 'key',
|
||||
Value: 'value',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
Expiration: { Days: 1 },
|
||||
}],
|
||||
},
|
||||
}, err => {
|
||||
assert.equal(err, null, `Err putting lifecycle config: ${err}`);
|
||||
s3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
||||
(err, res) => {
|
||||
assert.equal(err, null, 'Error getting lifecycle config: ' +
|
||||
Expiration: { Days: 1 },
|
||||
}],
|
||||
},
|
||||
}, err => {
|
||||
assert.equal(err, null, `Err putting lifecycle config: ${err}`);
|
||||
s3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
||||
(err, res) => {
|
||||
assert.equal(err, null, 'Error getting lifecycle config: ' +
|
||||
`${err}`);
|
||||
assert.strictEqual(res.Rules.length, 1);
|
||||
assert.deepStrictEqual(res.Rules[0], {
|
||||
Expiration: { Days: 1 },
|
||||
ID: 'test-id',
|
||||
Filter: {
|
||||
And: {
|
||||
Prefix: '',
|
||||
Tags: [
|
||||
{
|
||||
Key: 'key',
|
||||
Value: 'value',
|
||||
assert.strictEqual(res.Rules.length, 1);
|
||||
assert.deepStrictEqual(res.Rules[0], {
|
||||
Expiration: { Days: 1 },
|
||||
ID: 'test-id',
|
||||
Filter: {
|
||||
And: {
|
||||
Prefix: '',
|
||||
Tags: [
|
||||
{
|
||||
Key: 'key',
|
||||
Value: 'value',
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
Status: 'Enabled',
|
||||
Transitions: [],
|
||||
NoncurrentVersionTransitions: [],
|
||||
});
|
||||
done();
|
||||
});
|
||||
}));
|
||||
},
|
||||
Status: 'Enabled',
|
||||
Transitions: [],
|
||||
NoncurrentVersionTransitions: [],
|
||||
});
|
||||
done();
|
||||
});
|
||||
}));
|
||||
});
|
||||
});
|
||||
|
|
|
@ -48,10 +48,10 @@ describe('aws-sdk test get bucket notification', () => {
|
|||
|
||||
it('should return AccessDenied if user is not bucket owner', done => {
|
||||
otherAccountS3.getBucketNotificationConfiguration({ Bucket: bucket },
|
||||
err => {
|
||||
assertError(err, 'AccessDenied');
|
||||
done();
|
||||
});
|
||||
err => {
|
||||
assertError(err, 'AccessDenied');
|
||||
done();
|
||||
});
|
||||
});
|
||||
|
||||
it('should not return an error if no notification configuration ' +
|
||||
|
@ -69,11 +69,11 @@ describe('aws-sdk test get bucket notification', () => {
|
|||
}, err => {
|
||||
assert.equal(err, null, `Err putting notification config: ${err}`);
|
||||
s3.getBucketNotificationConfiguration({ Bucket: bucket },
|
||||
(err, res) => {
|
||||
assert.equal(err, null, `Error getting notification config: ${err}`);
|
||||
assert.deepStrictEqual(res.QueueConfigurations, notificationConfig.QueueConfigurations);
|
||||
done();
|
||||
});
|
||||
(err, res) => {
|
||||
assert.equal(err, null, `Error getting notification config: ${err}`);
|
||||
assert.deepStrictEqual(res.QueueConfigurations, notificationConfig.QueueConfigurations);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -55,15 +55,15 @@ describe('aws-sdk test get bucket policy', () => {
|
|||
|
||||
it('should return MethodNotAllowed if user is not bucket owner', done => {
|
||||
otherAccountS3.getBucketPolicy({ Bucket: bucket },
|
||||
err => assertError(err, 'MethodNotAllowed', done));
|
||||
err => assertError(err, 'MethodNotAllowed', done));
|
||||
});
|
||||
|
||||
it('should return NoSuchBucketPolicy error if no policy put to bucket',
|
||||
done => {
|
||||
s3.getBucketPolicy({ Bucket: bucket }, err => {
|
||||
assertError(err, 'NoSuchBucketPolicy', done);
|
||||
done => {
|
||||
s3.getBucketPolicy({ Bucket: bucket }, err => {
|
||||
assertError(err, 'NoSuchBucketPolicy', done);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
it('should get bucket policy', done => {
|
||||
s3.putBucketPolicy({
|
||||
|
@ -72,13 +72,13 @@ describe('aws-sdk test get bucket policy', () => {
|
|||
}, err => {
|
||||
assert.equal(err, null, `Err putting bucket policy: ${err}`);
|
||||
s3.getBucketPolicy({ Bucket: bucket },
|
||||
(err, res) => {
|
||||
const parsedRes = JSON.parse(res.Policy);
|
||||
assert.equal(err, null, 'Error getting bucket policy: ' +
|
||||
(err, res) => {
|
||||
const parsedRes = JSON.parse(res.Policy);
|
||||
assert.equal(err, null, 'Error getting bucket policy: ' +
|
||||
`${err}`);
|
||||
assert.deepStrictEqual(parsedRes.Statement[0], expectedPolicy);
|
||||
done();
|
||||
});
|
||||
assert.deepStrictEqual(parsedRes.Statement[0], expectedPolicy);
|
||||
done();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -45,7 +45,7 @@ describe('aws-node-sdk test getBucketReplication', () => {
|
|||
it("should return 'ReplicationConfigurationNotFoundError' if bucket does " +
|
||||
'not have a replication configuration', done =>
|
||||
s3.getBucketReplication({ Bucket: bucket }, err => {
|
||||
assert(errors.ReplicationConfigurationNotFoundError[err.code]);
|
||||
assert(errors.ReplicationConfigurationNotFoundError.is[err.code]);
|
||||
return done();
|
||||
}));
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue