Compare commits
9 Commits
developmen
...
improvemen
Author | SHA1 | Date |
---|---|---|
Jordi Bertran de Balanda | 9b92a50ca7 | |
Jordi Bertran de Balanda | a6230f5538 | |
Ronnie Smith | 3bfcf624bf | |
Jordi Bertran de Balanda | bd9209ef5e | |
Jordi Bertran de Balanda | 371cb689af | |
Xin LI | 674860ef8a | |
Xin LI | ce28e08d3e | |
Xin LI | 67df4fa207 | |
Xin LI | 4100ac73b2 |
|
@ -1 +1,6 @@
|
||||||
{ "extends": "scality" }
|
{
|
||||||
|
"extends": "scality",
|
||||||
|
"parserOptions": {
|
||||||
|
"ecmaVersion": 2020
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ RUN cd /tmp \
|
||||||
&& rm -rf /tmp/Python-$PY_VERSION.tgz
|
&& rm -rf /tmp/Python-$PY_VERSION.tgz
|
||||||
|
|
||||||
RUN yarn cache clean \
|
RUN yarn cache clean \
|
||||||
&& yarn install --production --ignore-optional --ignore-engines --network-concurrency 1 \
|
&& yarn install --production --ignore-optional --ignore-engines --network-concurrency 1 \
|
||||||
&& apt-get autoremove --purge -y python git build-essential \
|
&& apt-get autoremove --purge -y python git build-essential \
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
&& yarn cache clean \
|
&& yarn cache clean \
|
||||||
|
|
|
@ -27,7 +27,7 @@ if (config.backends.data === 'file' ||
|
||||||
dataServer.setup(err => {
|
dataServer.setup(err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
logger.error('Error initializing REST data server',
|
logger.error('Error initializing REST data server',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
dataServer.start();
|
dataServer.start();
|
||||||
|
|
128
lib/Config.js
128
lib/Config.js
|
@ -41,10 +41,10 @@ function restEndpointsAssert(restEndpoints, locationConstraints) {
|
||||||
'bad config: restEndpoints must be an object of endpoints');
|
'bad config: restEndpoints must be an object of endpoints');
|
||||||
assert(Object.keys(restEndpoints).every(
|
assert(Object.keys(restEndpoints).every(
|
||||||
r => typeof restEndpoints[r] === 'string'),
|
r => typeof restEndpoints[r] === 'string'),
|
||||||
'bad config: each endpoint must be a string');
|
'bad config: each endpoint must be a string');
|
||||||
assert(Object.keys(restEndpoints).every(
|
assert(Object.keys(restEndpoints).every(
|
||||||
r => typeof locationConstraints[restEndpoints[r]] === 'object'),
|
r => typeof locationConstraints[restEndpoints[r]] === 'object'),
|
||||||
'bad config: rest endpoint target not in locationConstraints');
|
'bad config: rest endpoint target not in locationConstraints');
|
||||||
}
|
}
|
||||||
|
|
||||||
function gcpLocationConstraintAssert(location, locationObj) {
|
function gcpLocationConstraintAssert(location, locationObj) {
|
||||||
|
@ -93,14 +93,14 @@ function gcpLocationConstraintAssert(location, locationObj) {
|
||||||
'serviceKey must be set in locationConfig or environment variable');
|
'serviceKey must be set in locationConfig or environment variable');
|
||||||
if (keyFilename) {
|
if (keyFilename) {
|
||||||
assert.strictEqual(typeof keyFilename, 'string',
|
assert.strictEqual(typeof keyFilename, 'string',
|
||||||
`bad location constriant: "${location}" serviceCredentials ` +
|
`bad location constriant: "${location}" serviceCredentials ` +
|
||||||
`keyFilename "${keyFilename}" must be a string`);
|
`keyFilename "${keyFilename}" must be a string`);
|
||||||
} else {
|
} else {
|
||||||
assert.strictEqual(typeof serviceEmail, 'string',
|
assert.strictEqual(typeof serviceEmail, 'string',
|
||||||
`bad location constriant: "${location}" serviceCredentials ` +
|
`bad location constriant: "${location}" serviceCredentials ` +
|
||||||
`serviceEmail "${serviceEmail}" must be a string`);
|
`serviceEmail "${serviceEmail}" must be a string`);
|
||||||
assert.strictEqual(typeof serviceKey, 'string',
|
assert.strictEqual(typeof serviceKey, 'string',
|
||||||
`bad location constriant: "${location}"" serviceCredentials ` +
|
`bad location constriant: "${location}"" serviceCredentials ` +
|
||||||
`serviceKey "${serviceKey}" must be a string`);
|
`serviceKey "${serviceKey}" must be a string`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -161,17 +161,17 @@ function locationConstraintAssert(locationConstraints) {
|
||||||
`be one of ${supportedBackends}`);
|
`be one of ${supportedBackends}`);
|
||||||
assert(typeof locationConstraints[l].legacyAwsBehavior
|
assert(typeof locationConstraints[l].legacyAwsBehavior
|
||||||
=== 'boolean',
|
=== 'boolean',
|
||||||
'bad config: locationConstraints[region]' +
|
'bad config: locationConstraints[region]' +
|
||||||
'.legacyAwsBehavior is mandatory and must be a boolean');
|
'.legacyAwsBehavior is mandatory and must be a boolean');
|
||||||
if (locationConstraints[l].details.serverSideEncryption !== undefined) {
|
if (locationConstraints[l].details.serverSideEncryption !== undefined) {
|
||||||
assert(typeof locationConstraints[l].details.serverSideEncryption
|
assert(typeof locationConstraints[l].details.serverSideEncryption
|
||||||
=== 'boolean',
|
=== 'boolean',
|
||||||
'bad config: locationConstraints[region]' +
|
'bad config: locationConstraints[region]' +
|
||||||
'.details.serverSideEncryption must be a boolean');
|
'.details.serverSideEncryption must be a boolean');
|
||||||
}
|
}
|
||||||
assert(typeof locationConstraints[l].details
|
assert(typeof locationConstraints[l].details
|
||||||
=== 'object',
|
=== 'object',
|
||||||
'bad config: locationConstraints[region].details is ' +
|
'bad config: locationConstraints[region].details is ' +
|
||||||
'mandatory and must be an object');
|
'mandatory and must be an object');
|
||||||
const details = locationConstraints[l].details;
|
const details = locationConstraints[l].details;
|
||||||
const stringFields = [
|
const stringFields = [
|
||||||
|
@ -265,7 +265,7 @@ function parseUtapiReindex({ enabled, schedule, sentinel, bucketd }) {
|
||||||
function requestsConfigAssert(requestsConfig) {
|
function requestsConfigAssert(requestsConfig) {
|
||||||
if (requestsConfig.viaProxy !== undefined) {
|
if (requestsConfig.viaProxy !== undefined) {
|
||||||
assert(typeof requestsConfig.viaProxy === 'boolean',
|
assert(typeof requestsConfig.viaProxy === 'boolean',
|
||||||
'config: invalid requests configuration. viaProxy must be a ' +
|
'config: invalid requests configuration. viaProxy must be a ' +
|
||||||
'boolean');
|
'boolean');
|
||||||
|
|
||||||
if (requestsConfig.viaProxy) {
|
if (requestsConfig.viaProxy) {
|
||||||
|
@ -328,7 +328,7 @@ class Config extends EventEmitter {
|
||||||
this.configPath = process.env.S3_CONFIG_FILE;
|
this.configPath = process.env.S3_CONFIG_FILE;
|
||||||
}
|
}
|
||||||
this.locationConfigPath = path.join(__dirname,
|
this.locationConfigPath = path.join(__dirname,
|
||||||
'../locationConfig.json');
|
'../locationConfig.json');
|
||||||
if (process.env.CI === 'true' && !process.env.S3_END_TO_END) {
|
if (process.env.CI === 'true' && !process.env.S3_END_TO_END) {
|
||||||
this.locationConfigPath = path.join(__dirname,
|
this.locationConfigPath = path.join(__dirname,
|
||||||
'../tests/locationConfig/locationConfigTests.json');
|
'../tests/locationConfig/locationConfigTests.json');
|
||||||
|
@ -347,7 +347,7 @@ class Config extends EventEmitter {
|
||||||
let locationConfig;
|
let locationConfig;
|
||||||
try {
|
try {
|
||||||
const data = fs.readFileSync(this.locationConfigPath,
|
const data = fs.readFileSync(this.locationConfigPath,
|
||||||
{ encoding: 'utf-8' });
|
{ encoding: 'utf-8' });
|
||||||
locationConfig = JSON.parse(data);
|
locationConfig = JSON.parse(data);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
throw new Error(`could not parse location config file:
|
throw new Error(`could not parse location config file:
|
||||||
|
@ -380,8 +380,8 @@ class Config extends EventEmitter {
|
||||||
'bad config: TLS file specification must be a string');
|
'bad config: TLS file specification must be a string');
|
||||||
}
|
}
|
||||||
const tlsFilePath = (tlsFileName[0] === '/')
|
const tlsFilePath = (tlsFileName[0] === '/')
|
||||||
? tlsFileName
|
? tlsFileName
|
||||||
: path.join(this._basepath, tlsFileName);
|
: path.join(this._basepath, tlsFileName);
|
||||||
let tlsFileContent;
|
let tlsFileContent;
|
||||||
try {
|
try {
|
||||||
tlsFileContent = fs.readFileSync(tlsFilePath);
|
tlsFileContent = fs.readFileSync(tlsFilePath);
|
||||||
|
@ -396,7 +396,7 @@ class Config extends EventEmitter {
|
||||||
let config;
|
let config;
|
||||||
try {
|
try {
|
||||||
const data = fs.readFileSync(this.configPath,
|
const data = fs.readFileSync(this.configPath,
|
||||||
{ encoding: 'utf-8' });
|
{ encoding: 'utf-8' });
|
||||||
config = JSON.parse(data);
|
config = JSON.parse(data);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
throw new Error(`could not parse config file: ${err.message}`);
|
throw new Error(`could not parse config file: ${err.message}`);
|
||||||
|
@ -413,7 +413,7 @@ class Config extends EventEmitter {
|
||||||
if (config.listenOn !== undefined) {
|
if (config.listenOn !== undefined) {
|
||||||
assert(Array.isArray(config.listenOn)
|
assert(Array.isArray(config.listenOn)
|
||||||
&& config.listenOn.every(e => typeof e === 'string'),
|
&& config.listenOn.every(e => typeof e === 'string'),
|
||||||
'bad config: listenOn must be a list of strings');
|
'bad config: listenOn must be a list of strings');
|
||||||
config.listenOn.forEach(item => {
|
config.listenOn.forEach(item => {
|
||||||
const lastColon = item.lastIndexOf(':');
|
const lastColon = item.lastIndexOf(':');
|
||||||
// if address is IPv6 format, it includes brackets
|
// if address is IPv6 format, it includes brackets
|
||||||
|
@ -504,14 +504,14 @@ class Config extends EventEmitter {
|
||||||
if (config.websiteEndpoints !== undefined) {
|
if (config.websiteEndpoints !== undefined) {
|
||||||
assert(Array.isArray(config.websiteEndpoints)
|
assert(Array.isArray(config.websiteEndpoints)
|
||||||
&& config.websiteEndpoints.every(e => typeof e === 'string'),
|
&& config.websiteEndpoints.every(e => typeof e === 'string'),
|
||||||
'bad config: websiteEndpoints must be a list of strings');
|
'bad config: websiteEndpoints must be a list of strings');
|
||||||
this.websiteEndpoints = config.websiteEndpoints;
|
this.websiteEndpoints = config.websiteEndpoints;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.clusters = false;
|
this.clusters = false;
|
||||||
if (config.clusters !== undefined) {
|
if (config.clusters !== undefined) {
|
||||||
assert(Number.isInteger(config.clusters) && config.clusters > 0,
|
assert(Number.isInteger(config.clusters) && config.clusters > 0,
|
||||||
'bad config: clusters must be a positive integer');
|
'bad config: clusters must be a positive integer');
|
||||||
this.clusters = config.clusters;
|
this.clusters = config.clusters;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -529,27 +529,27 @@ class Config extends EventEmitter {
|
||||||
if (config.cdmi !== undefined) {
|
if (config.cdmi !== undefined) {
|
||||||
if (config.cdmi.host !== undefined) {
|
if (config.cdmi.host !== undefined) {
|
||||||
assert.strictEqual(typeof config.cdmi.host, 'string',
|
assert.strictEqual(typeof config.cdmi.host, 'string',
|
||||||
'bad config: cdmi host must be a string');
|
'bad config: cdmi host must be a string');
|
||||||
this.cdmi.host = config.cdmi.host;
|
this.cdmi.host = config.cdmi.host;
|
||||||
}
|
}
|
||||||
if (config.cdmi.port !== undefined) {
|
if (config.cdmi.port !== undefined) {
|
||||||
assert(Number.isInteger(config.cdmi.port)
|
assert(Number.isInteger(config.cdmi.port)
|
||||||
&& config.cdmi.port > 0,
|
&& config.cdmi.port > 0,
|
||||||
'bad config: cdmi port must be a positive integer');
|
'bad config: cdmi port must be a positive integer');
|
||||||
this.cdmi.port = config.cdmi.port;
|
this.cdmi.port = config.cdmi.port;
|
||||||
}
|
}
|
||||||
if (config.cdmi.path !== undefined) {
|
if (config.cdmi.path !== undefined) {
|
||||||
assert(typeof config.cdmi.path === 'string',
|
assert(typeof config.cdmi.path === 'string',
|
||||||
'bad config: cdmi.path must be a string');
|
'bad config: cdmi.path must be a string');
|
||||||
assert(config.cdmi.path.length > 0,
|
assert(config.cdmi.path.length > 0,
|
||||||
'bad config: cdmi.path is empty');
|
'bad config: cdmi.path is empty');
|
||||||
assert(config.cdmi.path.charAt(0) === '/',
|
assert(config.cdmi.path.charAt(0) === '/',
|
||||||
'bad config: cdmi.path should start with a "/"');
|
'bad config: cdmi.path should start with a "/"');
|
||||||
this.cdmi.path = config.cdmi.path;
|
this.cdmi.path = config.cdmi.path;
|
||||||
}
|
}
|
||||||
if (config.cdmi.readonly !== undefined) {
|
if (config.cdmi.readonly !== undefined) {
|
||||||
assert(typeof config.cdmi.readonly === 'boolean',
|
assert(typeof config.cdmi.readonly === 'boolean',
|
||||||
'bad config: cdmi.readonly must be a boolean');
|
'bad config: cdmi.readonly must be a boolean');
|
||||||
this.cdmi.readonly = config.cdmi.readonly;
|
this.cdmi.readonly = config.cdmi.readonly;
|
||||||
} else {
|
} else {
|
||||||
this.cdmi.readonly = true;
|
this.cdmi.readonly = true;
|
||||||
|
@ -562,7 +562,7 @@ class Config extends EventEmitter {
|
||||||
assert(config.bucketd.bootstrap instanceof Array
|
assert(config.bucketd.bootstrap instanceof Array
|
||||||
&& config.bucketd.bootstrap.every(
|
&& config.bucketd.bootstrap.every(
|
||||||
e => typeof e === 'string'),
|
e => typeof e === 'string'),
|
||||||
'bad config: bucketd.bootstrap must be a list of strings');
|
'bad config: bucketd.bootstrap must be a list of strings');
|
||||||
this.bucketd.bootstrap = config.bucketd.bootstrap;
|
this.bucketd.bootstrap = config.bucketd.bootstrap;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -571,12 +571,12 @@ class Config extends EventEmitter {
|
||||||
if (config.vaultd.port !== undefined) {
|
if (config.vaultd.port !== undefined) {
|
||||||
assert(Number.isInteger(config.vaultd.port)
|
assert(Number.isInteger(config.vaultd.port)
|
||||||
&& config.vaultd.port > 0,
|
&& config.vaultd.port > 0,
|
||||||
'bad config: vaultd port must be a positive integer');
|
'bad config: vaultd port must be a positive integer');
|
||||||
this.vaultd.port = config.vaultd.port;
|
this.vaultd.port = config.vaultd.port;
|
||||||
}
|
}
|
||||||
if (config.vaultd.host !== undefined) {
|
if (config.vaultd.host !== undefined) {
|
||||||
assert.strictEqual(typeof config.vaultd.host, 'string',
|
assert.strictEqual(typeof config.vaultd.host, 'string',
|
||||||
'bad config: vaultd host must be a string');
|
'bad config: vaultd host must be a string');
|
||||||
this.vaultd.host = config.vaultd.host;
|
this.vaultd.host = config.vaultd.host;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -584,13 +584,13 @@ class Config extends EventEmitter {
|
||||||
if (config.dataClient) {
|
if (config.dataClient) {
|
||||||
this.dataClient = {};
|
this.dataClient = {};
|
||||||
assert.strictEqual(typeof config.dataClient.host, 'string',
|
assert.strictEqual(typeof config.dataClient.host, 'string',
|
||||||
'bad config: data client host must be ' +
|
'bad config: data client host must be ' +
|
||||||
'a string');
|
'a string');
|
||||||
this.dataClient.host = config.dataClient.host;
|
this.dataClient.host = config.dataClient.host;
|
||||||
|
|
||||||
assert(Number.isInteger(config.dataClient.port)
|
assert(Number.isInteger(config.dataClient.port)
|
||||||
&& config.dataClient.port > 0,
|
&& config.dataClient.port > 0,
|
||||||
'bad config: dataClient port must be a positive ' +
|
'bad config: dataClient port must be a positive ' +
|
||||||
'integer');
|
'integer');
|
||||||
this.dataClient.port = config.dataClient.port;
|
this.dataClient.port = config.dataClient.port;
|
||||||
}
|
}
|
||||||
|
@ -604,7 +604,7 @@ class Config extends EventEmitter {
|
||||||
|
|
||||||
assert(Number.isInteger(config.metadataClient.port)
|
assert(Number.isInteger(config.metadataClient.port)
|
||||||
&& config.metadataClient.port > 0,
|
&& config.metadataClient.port > 0,
|
||||||
'bad config: metadata client port must be a ' +
|
'bad config: metadata client port must be a ' +
|
||||||
'positive integer');
|
'positive integer');
|
||||||
this.metadataClient.port = config.metadataClient.port;
|
this.metadataClient.port = config.metadataClient.port;
|
||||||
}
|
}
|
||||||
|
@ -618,7 +618,7 @@ class Config extends EventEmitter {
|
||||||
|
|
||||||
assert(Number.isInteger(config.dataDaemon.port)
|
assert(Number.isInteger(config.dataDaemon.port)
|
||||||
&& config.dataDaemon.port > 0,
|
&& config.dataDaemon.port > 0,
|
||||||
'bad config: data daemon port must be a positive ' +
|
'bad config: data daemon port must be a positive ' +
|
||||||
'integer');
|
'integer');
|
||||||
this.dataDaemon.port = config.dataDaemon.port;
|
this.dataDaemon.port = config.dataDaemon.port;
|
||||||
|
|
||||||
|
@ -629,7 +629,7 @@ class Config extends EventEmitter {
|
||||||
*/
|
*/
|
||||||
this.dataDaemon.dataPath =
|
this.dataDaemon.dataPath =
|
||||||
process.env.S3DATAPATH ?
|
process.env.S3DATAPATH ?
|
||||||
process.env.S3DATAPATH : `${__dirname}/../localData`;
|
process.env.S3DATAPATH : `${__dirname}/../localData`;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.metadataDaemon) {
|
if (config.metadataDaemon) {
|
||||||
|
@ -642,7 +642,7 @@ class Config extends EventEmitter {
|
||||||
|
|
||||||
assert(Number.isInteger(config.metadataDaemon.port)
|
assert(Number.isInteger(config.metadataDaemon.port)
|
||||||
&& config.metadataDaemon.port > 0,
|
&& config.metadataDaemon.port > 0,
|
||||||
'bad config: metadata daemon port must be a ' +
|
'bad config: metadata daemon port must be a ' +
|
||||||
'positive integer');
|
'positive integer');
|
||||||
this.metadataDaemon.port = config.metadataDaemon.port;
|
this.metadataDaemon.port = config.metadataDaemon.port;
|
||||||
|
|
||||||
|
@ -653,7 +653,7 @@ class Config extends EventEmitter {
|
||||||
*/
|
*/
|
||||||
this.metadataDaemon.metadataPath =
|
this.metadataDaemon.metadataPath =
|
||||||
process.env.S3METADATAPATH ?
|
process.env.S3METADATAPATH ?
|
||||||
process.env.S3METADATAPATH : `${__dirname}/../localMetadata`;
|
process.env.S3METADATAPATH : `${__dirname}/../localMetadata`;
|
||||||
|
|
||||||
this.metadataDaemon.restEnabled =
|
this.metadataDaemon.restEnabled =
|
||||||
config.metadataDaemon.restEnabled;
|
config.metadataDaemon.restEnabled;
|
||||||
|
@ -699,7 +699,7 @@ class Config extends EventEmitter {
|
||||||
this.redis.name = config.redis.name;
|
this.redis.name = config.redis.name;
|
||||||
assert(Array.isArray(config.redis.sentinels) ||
|
assert(Array.isArray(config.redis.sentinels) ||
|
||||||
typeof config.redis.sentinels === 'string',
|
typeof config.redis.sentinels === 'string',
|
||||||
'bad config: redis sentinels must be an array or string');
|
'bad config: redis sentinels must be an array or string');
|
||||||
|
|
||||||
if (typeof config.redis.sentinels === 'string') {
|
if (typeof config.redis.sentinels === 'string') {
|
||||||
config.redis.sentinels.split(',').forEach(item => {
|
config.redis.sentinels.split(',').forEach(item => {
|
||||||
|
@ -720,7 +720,7 @@ class Config extends EventEmitter {
|
||||||
|
|
||||||
if (config.redis.sentinelPassword !== undefined) {
|
if (config.redis.sentinelPassword !== undefined) {
|
||||||
assert(
|
assert(
|
||||||
this._verifyRedisPassword(config.redis.sentinelPassword));
|
this._verifyRedisPassword(config.redis.sentinelPassword));
|
||||||
this.redis.sentinelPassword = config.redis.sentinelPassword;
|
this.redis.sentinelPassword = config.redis.sentinelPassword;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -751,14 +751,14 @@ class Config extends EventEmitter {
|
||||||
if (config.utapi.port) {
|
if (config.utapi.port) {
|
||||||
assert(Number.isInteger(config.utapi.port)
|
assert(Number.isInteger(config.utapi.port)
|
||||||
&& config.utapi.port > 0,
|
&& config.utapi.port > 0,
|
||||||
'bad config: utapi port must be a positive integer');
|
'bad config: utapi port must be a positive integer');
|
||||||
this.utapi.port = config.utapi.port;
|
this.utapi.port = config.utapi.port;
|
||||||
}
|
}
|
||||||
if (utapiVersion === 1) {
|
if (utapiVersion === 1) {
|
||||||
if (config.utapi.workers !== undefined) {
|
if (config.utapi.workers !== undefined) {
|
||||||
assert(Number.isInteger(config.utapi.workers)
|
assert(Number.isInteger(config.utapi.workers)
|
||||||
&& config.utapi.workers > 0,
|
&& config.utapi.workers > 0,
|
||||||
'bad config: utapi workers must be a positive integer');
|
'bad config: utapi workers must be a positive integer');
|
||||||
this.utapi.workers = config.utapi.workers;
|
this.utapi.workers = config.utapi.workers;
|
||||||
}
|
}
|
||||||
// Utapi uses the same localCache config defined for S3 to avoid
|
// Utapi uses the same localCache config defined for S3 to avoid
|
||||||
|
@ -804,7 +804,7 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
if (config.utapi.redis.sentinelPassword !== undefined) {
|
if (config.utapi.redis.sentinelPassword !== undefined) {
|
||||||
assert(
|
assert(
|
||||||
this._verifyRedisPassword(config.utapi.redis.sentinelPassword),
|
this._verifyRedisPassword(config.utapi.redis.sentinelPassword),
|
||||||
'config: invalid password for utapi redis. password' +
|
'config: invalid password for utapi redis. password' +
|
||||||
' must be a string');
|
' must be a string');
|
||||||
this.utapi.redis.sentinelPassword =
|
this.utapi.redis.sentinelPassword =
|
||||||
|
@ -814,15 +814,15 @@ class Config extends EventEmitter {
|
||||||
if (config.utapi.redis.retry.connectBackoff !== undefined) {
|
if (config.utapi.redis.retry.connectBackoff !== undefined) {
|
||||||
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
|
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
|
||||||
assert.strictEqual(typeof min, 'number',
|
assert.strictEqual(typeof min, 'number',
|
||||||
'utapi.redis.retry.connectBackoff: min must be a number');
|
'utapi.redis.retry.connectBackoff: min must be a number');
|
||||||
assert.strictEqual(typeof max, 'number',
|
assert.strictEqual(typeof max, 'number',
|
||||||
'utapi.redis.retry.connectBackoff: max must be a number');
|
'utapi.redis.retry.connectBackoff: max must be a number');
|
||||||
assert.strictEqual(typeof jitter, 'number',
|
assert.strictEqual(typeof jitter, 'number',
|
||||||
'utapi.redis.retry.connectBackoff: jitter must be a number');
|
'utapi.redis.retry.connectBackoff: jitter must be a number');
|
||||||
assert.strictEqual(typeof factor, 'number',
|
assert.strictEqual(typeof factor, 'number',
|
||||||
'utapi.redis.retry.connectBackoff: factor must be a number');
|
'utapi.redis.retry.connectBackoff: factor must be a number');
|
||||||
assert.strictEqual(typeof deadline, 'number',
|
assert.strictEqual(typeof deadline, 'number',
|
||||||
'utapi.redis.retry.connectBackoff: deadline must be a number');
|
'utapi.redis.retry.connectBackoff: deadline must be a number');
|
||||||
}
|
}
|
||||||
|
|
||||||
this.utapi.redis.retry = config.utapi.redis.retry;
|
this.utapi.redis.retry = config.utapi.redis.retry;
|
||||||
|
@ -924,8 +924,8 @@ class Config extends EventEmitter {
|
||||||
`bad config: utapi.filter.${state}.${field} must be an array of strings`);
|
`bad config: utapi.filter.${state}.${field} must be an array of strings`);
|
||||||
utapiResourceFilters[field] = { [state]: new Set(resources) };
|
utapiResourceFilters[field] = { [state]: new Set(resources) };
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
));
|
));
|
||||||
this.utapi.filter = utapiResourceFilters;
|
this.utapi.filter = utapiResourceFilters;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -934,12 +934,12 @@ class Config extends EventEmitter {
|
||||||
if (config.log !== undefined) {
|
if (config.log !== undefined) {
|
||||||
if (config.log.logLevel !== undefined) {
|
if (config.log.logLevel !== undefined) {
|
||||||
assert(typeof config.log.logLevel === 'string',
|
assert(typeof config.log.logLevel === 'string',
|
||||||
'bad config: log.logLevel must be a string');
|
'bad config: log.logLevel must be a string');
|
||||||
this.log.logLevel = config.log.logLevel;
|
this.log.logLevel = config.log.logLevel;
|
||||||
}
|
}
|
||||||
if (config.log.dumpLevel !== undefined) {
|
if (config.log.dumpLevel !== undefined) {
|
||||||
assert(typeof config.log.dumpLevel === 'string',
|
assert(typeof config.log.dumpLevel === 'string',
|
||||||
'bad config: log.dumpLevel must be a string');
|
'bad config: log.dumpLevel must be a string');
|
||||||
this.log.dumpLevel = config.log.dumpLevel;
|
this.log.dumpLevel = config.log.dumpLevel;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1008,8 +1008,8 @@ class Config extends EventEmitter {
|
||||||
cert: this._loadTlsFile(process.env.S3KMIP_CERT ||
|
cert: this._loadTlsFile(process.env.S3KMIP_CERT ||
|
||||||
undefined),
|
undefined),
|
||||||
ca: (process.env.S3KMIP_CA
|
ca: (process.env.S3KMIP_CA
|
||||||
? process.env.S3KMIP_CA.split(',')
|
? process.env.S3KMIP_CA.split(',')
|
||||||
: []).map(this._loadTlsFile),
|
: []).map(this._loadTlsFile),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -1044,12 +1044,12 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
if (port) {
|
if (port) {
|
||||||
assert(typeof port === 'number',
|
assert(typeof port === 'number',
|
||||||
'bad config: KMIP TLS Port must be a number');
|
'bad config: KMIP TLS Port must be a number');
|
||||||
this.kmip.transport.tls.port = port;
|
this.kmip.transport.tls.port = port;
|
||||||
}
|
}
|
||||||
if (host) {
|
if (host) {
|
||||||
assert(typeof host === 'string',
|
assert(typeof host === 'string',
|
||||||
'bad config: KMIP TLS Host must be a string');
|
'bad config: KMIP TLS Host must be a string');
|
||||||
this.kmip.transport.tls.host = host;
|
this.kmip.transport.tls.host = host;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1075,7 +1075,7 @@ class Config extends EventEmitter {
|
||||||
'be an array');
|
'be an array');
|
||||||
config.healthChecks.allowFrom.forEach(item => {
|
config.healthChecks.allowFrom.forEach(item => {
|
||||||
assert(typeof item === 'string',
|
assert(typeof item === 'string',
|
||||||
'config: invalid healthcheck configuration. allowFrom IP ' +
|
'config: invalid healthcheck configuration. allowFrom IP ' +
|
||||||
'address must be a string');
|
'address must be a string');
|
||||||
});
|
});
|
||||||
this.healthChecks.allowFrom = defaultHealthChecks.allowFrom
|
this.healthChecks.allowFrom = defaultHealthChecks.allowFrom
|
||||||
|
@ -1086,10 +1086,10 @@ class Config extends EventEmitter {
|
||||||
assert(typeof config.certFilePaths === 'object' &&
|
assert(typeof config.certFilePaths === 'object' &&
|
||||||
typeof config.certFilePaths.key === 'string' &&
|
typeof config.certFilePaths.key === 'string' &&
|
||||||
typeof config.certFilePaths.cert === 'string' && ((
|
typeof config.certFilePaths.cert === 'string' && ((
|
||||||
config.certFilePaths.ca &&
|
config.certFilePaths.ca &&
|
||||||
typeof config.certFilePaths.ca === 'string') ||
|
typeof config.certFilePaths.ca === 'string') ||
|
||||||
!config.certFilePaths.ca)
|
!config.certFilePaths.ca),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
const { key, cert, ca } = config.certFilePaths ?
|
const { key, cert, ca } = config.certFilePaths ?
|
||||||
config.certFilePaths : {};
|
config.certFilePaths : {};
|
||||||
|
@ -1102,14 +1102,14 @@ class Config extends EventEmitter {
|
||||||
capath = (ca[0] === '/') ? ca : `${this._basePath}/${ca}`;
|
capath = (ca[0] === '/') ? ca : `${this._basePath}/${ca}`;
|
||||||
assert.doesNotThrow(() =>
|
assert.doesNotThrow(() =>
|
||||||
fs.accessSync(capath, fs.F_OK | fs.R_OK),
|
fs.accessSync(capath, fs.F_OK | fs.R_OK),
|
||||||
`File not found or unreachable: ${capath}`);
|
`File not found or unreachable: ${capath}`);
|
||||||
}
|
}
|
||||||
assert.doesNotThrow(() =>
|
assert.doesNotThrow(() =>
|
||||||
fs.accessSync(keypath, fs.F_OK | fs.R_OK),
|
fs.accessSync(keypath, fs.F_OK | fs.R_OK),
|
||||||
`File not found or unreachable: ${keypath}`);
|
`File not found or unreachable: ${keypath}`);
|
||||||
assert.doesNotThrow(() =>
|
assert.doesNotThrow(() =>
|
||||||
fs.accessSync(certpath, fs.F_OK | fs.R_OK),
|
fs.accessSync(certpath, fs.F_OK | fs.R_OK),
|
||||||
`File not found or unreachable: ${certpath}`);
|
`File not found or unreachable: ${certpath}`);
|
||||||
this.https = {
|
this.https = {
|
||||||
cert: fs.readFileSync(certpath, 'ascii'),
|
cert: fs.readFileSync(certpath, 'ascii'),
|
||||||
key: fs.readFileSync(keypath, 'ascii'),
|
key: fs.readFileSync(keypath, 'ascii'),
|
||||||
|
@ -1174,7 +1174,7 @@ class Config extends EventEmitter {
|
||||||
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
|
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
|
||||||
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
|
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
|
||||||
'bad environment variable: S3BACKEND environment variable ' +
|
'bad environment variable: S3BACKEND environment variable ' +
|
||||||
'should be one of mem/file/scality/cdmi'
|
'should be one of mem/file/scality/cdmi',
|
||||||
);
|
);
|
||||||
auth = process.env.S3BACKEND;
|
auth = process.env.S3BACKEND;
|
||||||
data = process.env.S3BACKEND;
|
data = process.env.S3BACKEND;
|
||||||
|
@ -1195,8 +1195,8 @@ class Config extends EventEmitter {
|
||||||
if (process.env.SCALITY_ACCESS_KEY_ID &&
|
if (process.env.SCALITY_ACCESS_KEY_ID &&
|
||||||
process.env.SCALITY_SECRET_ACCESS_KEY) {
|
process.env.SCALITY_SECRET_ACCESS_KEY) {
|
||||||
authData = buildAuthDataAccount(
|
authData = buildAuthDataAccount(
|
||||||
process.env.SCALITY_ACCESS_KEY_ID,
|
process.env.SCALITY_ACCESS_KEY_ID,
|
||||||
process.env.SCALITY_SECRET_ACCESS_KEY);
|
process.env.SCALITY_SECRET_ACCESS_KEY);
|
||||||
} else {
|
} else {
|
||||||
authData = require(authfile);
|
authData = require(authfile);
|
||||||
}
|
}
|
||||||
|
@ -1209,7 +1209,7 @@ class Config extends EventEmitter {
|
||||||
const validData = ['mem', 'file', 'scality', 'multiple'];
|
const validData = ['mem', 'file', 'scality', 'multiple'];
|
||||||
assert(validData.indexOf(process.env.S3DATA) > -1,
|
assert(validData.indexOf(process.env.S3DATA) > -1,
|
||||||
'bad environment variable: S3DATA environment variable ' +
|
'bad environment variable: S3DATA environment variable ' +
|
||||||
'should be one of mem/file/scality/multiple'
|
'should be one of mem/file/scality/multiple',
|
||||||
);
|
);
|
||||||
data = process.env.S3DATA;
|
data = process.env.S3DATA;
|
||||||
}
|
}
|
||||||
|
@ -1218,7 +1218,7 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
assert(this.locationConstraints !== undefined &&
|
assert(this.locationConstraints !== undefined &&
|
||||||
this.restEndpoints !== undefined,
|
this.restEndpoints !== undefined,
|
||||||
'bad config: locationConstraints and restEndpoints must be set'
|
'bad config: locationConstraints and restEndpoints must be set',
|
||||||
);
|
);
|
||||||
|
|
||||||
if (process.env.S3METADATA) {
|
if (process.env.S3METADATA) {
|
||||||
|
@ -1319,7 +1319,7 @@ class Config extends EventEmitter {
|
||||||
|
|
||||||
isAWSServerSideEncryption(locationConstraint) {
|
isAWSServerSideEncryption(locationConstraint) {
|
||||||
return this.locationConstraints[locationConstraint].details
|
return this.locationConstraints[locationConstraint].details
|
||||||
.serverSideEncryption === true;
|
.serverSideEncryption === true;
|
||||||
}
|
}
|
||||||
|
|
||||||
getGcpServiceParams(locationConstraint) {
|
getGcpServiceParams(locationConstraint) {
|
||||||
|
|
|
@ -167,8 +167,8 @@ const api = {
|
||||||
returnTagCount = checkedResults;
|
returnTagCount = checkedResults;
|
||||||
}
|
}
|
||||||
return tagConditionKeyAuth(authorizationResults, request, requestContexts, apiMethod, log,
|
return tagConditionKeyAuth(authorizationResults, request, requestContexts, apiMethod, log,
|
||||||
(err, tagAuthResults, updatedContexts) =>
|
(err, tagAuthResults, updatedContexts) =>
|
||||||
next(err, tagAuthResults, authorizationResults, userInfo, streamingV4Params, updatedContexts));
|
next(err, tagAuthResults, authorizationResults, userInfo, streamingV4Params, updatedContexts));
|
||||||
},
|
},
|
||||||
], (err, tagAuthResults, authorizationResults, userInfo, streamingV4Params, updatedContexts) => {
|
], (err, tagAuthResults, authorizationResults, userInfo, streamingV4Params, updatedContexts) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -225,29 +225,29 @@ const api = {
|
||||||
|
|
||||||
// IAM policy -Tag condition keys require information from CloudServer for evaluation
|
// IAM policy -Tag condition keys require information from CloudServer for evaluation
|
||||||
return tagConditionKeyAuth(authorizationResults, request, (updatedContexts || requestContexts),
|
return tagConditionKeyAuth(authorizationResults, request, (updatedContexts || requestContexts),
|
||||||
apiMethod, log, (err, tagAuthResults) => {
|
apiMethod, log, (err, tagAuthResults) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('tag authentication error', { error: err });
|
log.trace('tag authentication error', { error: err });
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
|
||||||
if (tagAuthResults) {
|
|
||||||
const checkedResults = checkAuthResults(tagAuthResults);
|
|
||||||
if (checkedResults instanceof Error) {
|
|
||||||
return callback(checkedResults);
|
|
||||||
}
|
}
|
||||||
returnTagCount = checkedResults;
|
if (tagAuthResults) {
|
||||||
}
|
const checkedResults = checkAuthResults(tagAuthResults);
|
||||||
if (apiMethod === 'objectCopy' ||
|
if (checkedResults instanceof Error) {
|
||||||
|
return callback(checkedResults);
|
||||||
|
}
|
||||||
|
returnTagCount = checkedResults;
|
||||||
|
}
|
||||||
|
if (apiMethod === 'objectCopy' ||
|
||||||
apiMethod === 'objectPutCopyPart') {
|
apiMethod === 'objectPutCopyPart') {
|
||||||
return this[apiMethod](userInfo, request, sourceBucket,
|
return this[apiMethod](userInfo, request, sourceBucket,
|
||||||
sourceObject, sourceVersionId, log, callback);
|
sourceObject, sourceVersionId, log, callback);
|
||||||
}
|
}
|
||||||
if (apiMethod === 'objectGet') {
|
if (apiMethod === 'objectGet') {
|
||||||
return this[apiMethod](userInfo, request,
|
return this[apiMethod](userInfo, request,
|
||||||
returnTagCount, log, callback);
|
returnTagCount, log, callback);
|
||||||
}
|
}
|
||||||
return this[apiMethod](userInfo, request, log, callback);
|
return this[apiMethod](userInfo, request, log, callback);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
return undefined;
|
return undefined;
|
||||||
});
|
});
|
||||||
|
|
|
@ -68,7 +68,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
if (apiMethodAfterVersionCheck === 'objectCopy'
|
if (apiMethodAfterVersionCheck === 'objectCopy'
|
||||||
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
||||||
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
||||||
'objectGet';
|
'objectGet';
|
||||||
const reqQuery = Object.assign({}, request.query,
|
const reqQuery = Object.assign({}, request.query,
|
||||||
{ versionId: sourceVersionId });
|
{ versionId: sourceVersionId });
|
||||||
const getRequestContext = new RequestContext(request.headers,
|
const getRequestContext = new RequestContext(request.headers,
|
||||||
|
@ -96,7 +96,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
|| apiMethodAfterVersionCheck === 'objectGetVersion') {
|
|| apiMethodAfterVersionCheck === 'objectGetVersion') {
|
||||||
const objectGetTaggingAction = (request.query &&
|
const objectGetTaggingAction = (request.query &&
|
||||||
request.query.versionId) ? 'objectGetTaggingVersion' :
|
request.query.versionId) ? 'objectGetTaggingVersion' :
|
||||||
'objectGetTagging';
|
'objectGetTagging';
|
||||||
const getRequestContext =
|
const getRequestContext =
|
||||||
generateRequestContext(apiMethodAfterVersionCheck);
|
generateRequestContext(apiMethodAfterVersionCheck);
|
||||||
const getTaggingRequestContext =
|
const getTaggingRequestContext =
|
||||||
|
|
|
@ -47,20 +47,20 @@ function updateRequestContexts(request, requestContexts, apiMethod, log, cb) {
|
||||||
}
|
}
|
||||||
const reqVersionId = decodedVidResult;
|
const reqVersionId = decodedVidResult;
|
||||||
return metadata.getObjectMD(bucketName, objectKey, { versionId: reqVersionId }, log,
|
return metadata.getObjectMD(bucketName, objectKey, { versionId: reqVersionId }, log,
|
||||||
(err, objMD) => {
|
(err, objMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
if (err.NoSuchKey) {
|
if (err.is.NoSuchKey) {
|
||||||
return next();
|
return next();
|
||||||
|
}
|
||||||
|
log.trace('error getting request object tags');
|
||||||
|
return next(err);
|
||||||
}
|
}
|
||||||
log.trace('error getting request object tags');
|
const existingTags = objMD.tags;
|
||||||
return next(err);
|
if (existingTags) {
|
||||||
}
|
rc.setExistingObjTag(makeTagQuery(existingTags));
|
||||||
const existingTags = objMD.tags;
|
}
|
||||||
if (existingTags) {
|
return next();
|
||||||
rc.setExistingObjTag(makeTagQuery(existingTags));
|
});
|
||||||
}
|
|
||||||
return next();
|
|
||||||
});
|
|
||||||
},
|
},
|
||||||
], err => {
|
], err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|
|
@ -93,7 +93,7 @@ const _validator = {
|
||||||
validateNumberRules(length) {
|
validateNumberRules(length) {
|
||||||
if (length > 100) {
|
if (length > 100) {
|
||||||
return errors.InvalidRequest
|
return errors.InvalidRequest
|
||||||
.customizeDescription(customizedErrs.numberRules);
|
.customizeDescription(customizedErrs.numberRules);
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
},
|
},
|
||||||
|
@ -323,20 +323,20 @@ function parseCorsXml(xml, log, cb) {
|
||||||
function convertToXml(arrayRules) {
|
function convertToXml(arrayRules) {
|
||||||
const xml = [];
|
const xml = [];
|
||||||
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
|
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
|
||||||
'<CORSConfiguration>');
|
'<CORSConfiguration>');
|
||||||
arrayRules.forEach(rule => {
|
arrayRules.forEach(rule => {
|
||||||
xml.push('<CORSRule>');
|
xml.push('<CORSRule>');
|
||||||
['allowedMethods', 'allowedOrigins', 'allowedHeaders', 'exposeHeaders']
|
['allowedMethods', 'allowedOrigins', 'allowedHeaders', 'exposeHeaders']
|
||||||
.forEach(key => {
|
.forEach(key => {
|
||||||
if (rule[key]) {
|
if (rule[key]) {
|
||||||
const element = key.charAt(0).toUpperCase() +
|
const element = key.charAt(0).toUpperCase() +
|
||||||
key.slice(1, -1);
|
key.slice(1, -1);
|
||||||
rule[key].forEach(value => {
|
rule[key].forEach(value => {
|
||||||
xml.push(`<${element}>${escapeForXml(value)}` +
|
xml.push(`<${element}>${escapeForXml(value)}` +
|
||||||
`</${element}>`);
|
`</${element}>`);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
if (rule.id) {
|
if (rule.id) {
|
||||||
xml.push(`<ID>${escapeForXml(rule.id)}</ID>`);
|
xml.push(`<ID>${escapeForXml(rule.id)}</ID>`);
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,7 +22,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
|
||||||
|
|
||||||
// Get new format usersBucket to see if it exists
|
// Get new format usersBucket to see if it exists
|
||||||
return metadata.getBucket(usersBucket, log, (err, usersBucketAttrs) => {
|
return metadata.getBucket(usersBucket, log, (err, usersBucketAttrs) => {
|
||||||
if (err && !err.NoSuchBucket && !err.BucketAlreadyExists) {
|
if (err && !err.is.NoSuchBucket && !err.is.BucketAlreadyExists) {
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
const splitter = usersBucketAttrs ?
|
const splitter = usersBucketAttrs ?
|
||||||
|
@ -36,7 +36,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
|
||||||
usersBucket : oldUsersBucket;
|
usersBucket : oldUsersBucket;
|
||||||
return metadata.putObjectMD(usersBucketBeingCalled, key,
|
return metadata.putObjectMD(usersBucketBeingCalled, key,
|
||||||
omVal, {}, log, err => {
|
omVal, {}, log, err => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
// There must be no usersBucket so createBucket
|
// There must be no usersBucket so createBucket
|
||||||
// one using the new format
|
// one using the new format
|
||||||
log.trace('users bucket does not exist, ' +
|
log.trace('users bucket does not exist, ' +
|
||||||
|
@ -57,8 +57,7 @@ function addToUsersBucket(canonicalID, bucketName, log, cb) {
|
||||||
// error with respect
|
// error with respect
|
||||||
// to the usersBucket.
|
// to the usersBucket.
|
||||||
if (err &&
|
if (err &&
|
||||||
err !==
|
!err.is.BucketAlreadyExists) {
|
||||||
errors.BucketAlreadyExists) {
|
|
||||||
log.error('error from metadata', {
|
log.error('error from metadata', {
|
||||||
error: err,
|
error: err,
|
||||||
});
|
});
|
||||||
|
@ -206,7 +205,7 @@ function createBucket(authInfo, bucketName, headers,
|
||||||
},
|
},
|
||||||
getAnyExistingBucketInfo: function getAnyExistingBucketInfo(callback) {
|
getAnyExistingBucketInfo: function getAnyExistingBucketInfo(callback) {
|
||||||
metadata.getBucket(bucketName, log, (err, data) => {
|
metadata.getBucket(bucketName, log, (err, data) => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
return callback(null, 'NoBucketYet');
|
return callback(null, 'NoBucketYet');
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -241,7 +240,7 @@ function createBucket(authInfo, bucketName, headers,
|
||||||
'new bucket without flags; adding transient label');
|
'new bucket without flags; adding transient label');
|
||||||
newBucketMD.addTransientFlag();
|
newBucketMD.addTransientFlag();
|
||||||
return freshStartCreateBucket(newBucketMD, canonicalID,
|
return freshStartCreateBucket(newBucketMD, canonicalID,
|
||||||
log, cb);
|
log, cb);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
if (existingBucketMD.hasTransientFlag() ||
|
if (existingBucketMD.hasTransientFlag() ||
|
||||||
|
|
|
@ -16,7 +16,7 @@ function _deleteMPUbucket(destinationBucketName, log, cb) {
|
||||||
`${mpuBucketPrefix}${destinationBucketName}`;
|
`${mpuBucketPrefix}${destinationBucketName}`;
|
||||||
return metadata.deleteBucket(mpuBucketName, log, err => {
|
return metadata.deleteBucket(mpuBucketName, log, err => {
|
||||||
// If the mpu bucket does not exist, just move on
|
// If the mpu bucket does not exist, just move on
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
return cb();
|
return cb();
|
||||||
}
|
}
|
||||||
return cb(err);
|
return cb(err);
|
||||||
|
@ -90,7 +90,7 @@ function deleteBucket(authInfo, bucketMD, bucketName, canonicalID, log, cb) {
|
||||||
log, (err, objectsListRes) => {
|
log, (err, objectsListRes) => {
|
||||||
// If no shadow bucket ever created, no ongoing MPU's, so
|
// If no shadow bucket ever created, no ongoing MPU's, so
|
||||||
// continue with deletion
|
// continue with deletion
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
return next();
|
return next();
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
|
|
|
@ -25,9 +25,9 @@ function bucketShield(bucket, requestType) {
|
||||||
invisiblyDelete(bucket.getName(), bucket.getOwner());
|
invisiblyDelete(bucket.getName(), bucket.getOwner());
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
// If request is initiateMultipartUpload (requestType objectPut),
|
// If request is initiateMultipartUpload (requestType objectPut),
|
||||||
// objectPut, bucketPutACL or bucketDelete, proceed with request.
|
// objectPut, bucketPutACL or bucketDelete, proceed with request.
|
||||||
// Otherwise return an error to the client
|
// Otherwise return an error to the client
|
||||||
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
|
if ((bucket.hasDeletedFlag() || bucket.hasTransientFlag()) &&
|
||||||
(requestType !== 'objectPut' &&
|
(requestType !== 'objectPut' &&
|
||||||
requestType !== 'bucketPutACL' &&
|
requestType !== 'bucketPutACL' &&
|
||||||
|
|
|
@ -385,25 +385,25 @@ function convertToXml(config) {
|
||||||
'"http://s3.amazonaws.com/doc/2006-03-01/">');
|
'"http://s3.amazonaws.com/doc/2006-03-01/">');
|
||||||
if (indexDocument) {
|
if (indexDocument) {
|
||||||
xml.push('<IndexDocument>',
|
xml.push('<IndexDocument>',
|
||||||
`<Suffix>${escapeForXml(indexDocument)}</Suffix>`,
|
`<Suffix>${escapeForXml(indexDocument)}</Suffix>`,
|
||||||
'</IndexDocument>');
|
'</IndexDocument>');
|
||||||
}
|
}
|
||||||
if (errorDocument) {
|
if (errorDocument) {
|
||||||
xml.push('<ErrorDocument>',
|
xml.push('<ErrorDocument>',
|
||||||
`<Key>${escapeForXml(errorDocument)}</Key>`,
|
`<Key>${escapeForXml(errorDocument)}</Key>`,
|
||||||
'</ErrorDocument>');
|
'</ErrorDocument>');
|
||||||
}
|
}
|
||||||
if (redirectAllRequestsTo) {
|
if (redirectAllRequestsTo) {
|
||||||
xml.push('<RedirectAllRequestsTo>');
|
xml.push('<RedirectAllRequestsTo>');
|
||||||
if (redirectAllRequestsTo.hostName) {
|
if (redirectAllRequestsTo.hostName) {
|
||||||
xml.push('<HostName>',
|
xml.push('<HostName>',
|
||||||
`${escapeForXml(redirectAllRequestsTo.hostName)}`,
|
`${escapeForXml(redirectAllRequestsTo.hostName)}`,
|
||||||
'</HostName>');
|
'</HostName>');
|
||||||
}
|
}
|
||||||
if (redirectAllRequestsTo.protocol) {
|
if (redirectAllRequestsTo.protocol) {
|
||||||
xml.push('<Protocol>',
|
xml.push('<Protocol>',
|
||||||
`${redirectAllRequestsTo.protocol}`,
|
`${redirectAllRequestsTo.protocol}`,
|
||||||
'</Protocol>');
|
'</Protocol>');
|
||||||
}
|
}
|
||||||
xml.push('</RedirectAllRequestsTo>');
|
xml.push('</RedirectAllRequestsTo>');
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,22 +11,22 @@ function deleteUserBucketEntry(bucketName, canonicalID, log, cb) {
|
||||||
metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => {
|
metadata.deleteObjectMD(usersBucket, keyForUserBucket, {}, log, error => {
|
||||||
// If the object representing the bucket is not in the
|
// If the object representing the bucket is not in the
|
||||||
// users bucket just continue
|
// users bucket just continue
|
||||||
if (error && error.NoSuchKey) {
|
if (error?.is.NoSuchKey) {
|
||||||
return cb(null);
|
return cb(null);
|
||||||
// BACKWARDS COMPATIBILITY: Remove this once no longer
|
// BACKWARDS COMPATIBILITY: Remove this once no longer
|
||||||
// have old user bucket format
|
// have old user bucket format
|
||||||
} else if (error && error.NoSuchBucket) {
|
} else if (error?.is.NoSuchBucket) {
|
||||||
const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
|
const keyForUserBucket2 = createKeyForUserBucket(canonicalID,
|
||||||
oldSplitter, bucketName);
|
oldSplitter, bucketName);
|
||||||
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
|
return metadata.deleteObjectMD(oldUsersBucket, keyForUserBucket2,
|
||||||
{}, log, error => {
|
{}, log, error => {
|
||||||
if (error && !error.NoSuchKey) {
|
if (error && !error.is.NoSuchKey) {
|
||||||
log.error('from metadata while deleting user bucket',
|
log.error('from metadata while deleting user bucket',
|
||||||
{ error });
|
{ error });
|
||||||
return cb(error);
|
return cb(error);
|
||||||
}
|
}
|
||||||
log.trace('deleted bucket from user bucket',
|
log.trace('deleted bucket from user bucket',
|
||||||
{ method: '_deleteUserBucketEntry' });
|
{ method: '_deleteUserBucketEntry' });
|
||||||
return cb(null);
|
return cb(null);
|
||||||
});
|
});
|
||||||
} else if (error) {
|
} else if (error) {
|
||||||
|
|
|
@ -16,19 +16,19 @@ function invisiblyDelete(bucketName, canonicalID) {
|
||||||
return deleteUserBucketEntry(bucketName, canonicalID, log, err => {
|
return deleteUserBucketEntry(bucketName, canonicalID, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error invisibly deleting bucket name from user bucket',
|
log.error('error invisibly deleting bucket name from user bucket',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
return log.end();
|
return log.end();
|
||||||
}
|
}
|
||||||
log.trace('deleted bucket name from user bucket');
|
log.trace('deleted bucket name from user bucket');
|
||||||
return metadata.deleteBucket(bucketName, log, error => {
|
return metadata.deleteBucket(bucketName, log, error => {
|
||||||
log.trace('deleting bucket from metadata',
|
log.trace('deleting bucket from metadata',
|
||||||
{ method: 'invisiblyDelete' });
|
{ method: 'invisiblyDelete' });
|
||||||
if (error) {
|
if (error) {
|
||||||
log.error('error deleting bucket from metadata', { error });
|
log.error('error deleting bucket from metadata', { error });
|
||||||
return log.end();
|
return log.end();
|
||||||
}
|
}
|
||||||
log.trace('invisible deletion of bucket succeeded',
|
log.trace('invisible deletion of bucket succeeded',
|
||||||
{ method: 'invisiblyDelete' });
|
{ method: 'invisiblyDelete' });
|
||||||
return log.end();
|
return log.end();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -51,7 +51,7 @@ class BackendInfo {
|
||||||
static isRequestEndpointPresent(requestEndpoint, log) {
|
static isRequestEndpointPresent(requestEndpoint, log) {
|
||||||
if (Object.keys(config.restEndpoints).indexOf(requestEndpoint) < 0) {
|
if (Object.keys(config.restEndpoints).indexOf(requestEndpoint) < 0) {
|
||||||
log.trace('requestEndpoint does not match config restEndpoints',
|
log.trace('requestEndpoint does not match config restEndpoints',
|
||||||
{ requestEndpoint });
|
{ requestEndpoint });
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -69,7 +69,7 @@ class BackendInfo {
|
||||||
.restEndpoints[requestEndpoint]) < 0) {
|
.restEndpoints[requestEndpoint]) < 0) {
|
||||||
log.trace('the default locationConstraint for request' +
|
log.trace('the default locationConstraint for request' +
|
||||||
'Endpoint does not match any config locationConstraint',
|
'Endpoint does not match any config locationConstraint',
|
||||||
{ requestEndpoint });
|
{ requestEndpoint });
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -152,7 +152,7 @@ class BackendInfo {
|
||||||
return { isValid: true, legacyLocationConstraint };
|
return { isValid: true, legacyLocationConstraint };
|
||||||
}
|
}
|
||||||
if (!BackendInfo.isValidRequestEndpointOrBackend(requestEndpoint,
|
if (!BackendInfo.isValidRequestEndpointOrBackend(requestEndpoint,
|
||||||
log)) {
|
log)) {
|
||||||
return { isValid: false, description: 'Endpoint Location Error - ' +
|
return { isValid: false, description: 'Endpoint Location Error - ' +
|
||||||
`Your endpoint "${requestEndpoint}" is not in restEndpoints ` +
|
`Your endpoint "${requestEndpoint}" is not in restEndpoints ` +
|
||||||
'in your config OR the default location constraint for request ' +
|
'in your config OR the default location constraint for request ' +
|
||||||
|
|
|
@ -56,7 +56,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function ifMultipleBackend(mpuBucket, mpuOverviewObj, destBucket,
|
function ifMultipleBackend(mpuBucket, mpuOverviewObj, destBucket,
|
||||||
next) {
|
next) {
|
||||||
if (config.backends.data === 'multiple') {
|
if (config.backends.data === 'multiple') {
|
||||||
let location;
|
let location;
|
||||||
// if controlling location constraint is not stored in object
|
// if controlling location constraint is not stored in object
|
||||||
|
@ -75,13 +75,13 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
||||||
location = mpuOverviewObj.controllingLocationConstraint;
|
location = mpuOverviewObj.controllingLocationConstraint;
|
||||||
}
|
}
|
||||||
return multipleBackendGateway.abortMPU(objectKey, uploadId,
|
return multipleBackendGateway.abortMPU(objectKey, uploadId,
|
||||||
location, bucketName, log, (err, skipDataDelete) => {
|
location, bucketName, log, (err, skipDataDelete) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destBucket);
|
return next(err, destBucket);
|
||||||
}
|
}
|
||||||
return next(null, mpuBucket, destBucket,
|
return next(null, mpuBucket, destBucket,
|
||||||
skipDataDelete);
|
skipDataDelete);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
return next(null, mpuBucket, destBucket, false);
|
return next(null, mpuBucket, destBucket, false);
|
||||||
},
|
},
|
||||||
|
@ -95,7 +95,7 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function getPartLocations(mpuBucket, destBucket, skipDataDelete,
|
function getPartLocations(mpuBucket, destBucket, skipDataDelete,
|
||||||
next) {
|
next) {
|
||||||
services.getMPUparts(mpuBucket.getName(), uploadId, log,
|
services.getMPUparts(mpuBucket.getName(), uploadId, log,
|
||||||
(err, result) => {
|
(err, result) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -103,11 +103,11 @@ function abortMultipartUpload(authInfo, bucketName, objectKey, uploadId, log,
|
||||||
}
|
}
|
||||||
const storedParts = result.Contents;
|
const storedParts = result.Contents;
|
||||||
return next(null, mpuBucket, storedParts, destBucket,
|
return next(null, mpuBucket, storedParts, destBucket,
|
||||||
skipDataDelete);
|
skipDataDelete);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function deleteData(mpuBucket, storedParts, destBucket,
|
function deleteData(mpuBucket, storedParts, destBucket,
|
||||||
skipDataDelete, next) {
|
skipDataDelete, next) {
|
||||||
// for Azure we do not need to delete data
|
// for Azure we do not need to delete data
|
||||||
if (skipDataDelete) {
|
if (skipDataDelete) {
|
||||||
return next(null, mpuBucket, storedParts, destBucket);
|
return next(null, mpuBucket, storedParts, destBucket);
|
||||||
|
|
|
@ -100,7 +100,7 @@ function findCorsRule(rules, origin, method, headers) {
|
||||||
* @return {object} resHeaders - headers to include in response
|
* @return {object} resHeaders - headers to include in response
|
||||||
*/
|
*/
|
||||||
function generateCorsResHeaders(rule, origin, method, headers,
|
function generateCorsResHeaders(rule, origin, method, headers,
|
||||||
isPreflight) {
|
isPreflight) {
|
||||||
const resHeaders = {
|
const resHeaders = {
|
||||||
'access-control-max-age': rule.maxAgeSeconds,
|
'access-control-max-age': rule.maxAgeSeconds,
|
||||||
'access-control-allow-methods': rule.allowedMethods.join(', '),
|
'access-control-allow-methods': rule.allowedMethods.join(', '),
|
||||||
|
|
|
@ -84,8 +84,8 @@ function _storeInMDandDeleteData(bucketName, dataGetInfo, cipherBundle,
|
||||||
* result.versionId - unencrypted versionId returned by metadata
|
* result.versionId - unencrypted versionId returned by metadata
|
||||||
*/
|
*/
|
||||||
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
|
canonicalID, cipherBundle, request, isDeleteMarker, streamingV4Params,
|
||||||
log, callback) {
|
log, callback) {
|
||||||
const size = isDeleteMarker ? 0 : request.parsedContentLength;
|
const size = isDeleteMarker ? 0 : request.parsedContentLength;
|
||||||
// although the request method may actually be 'DELETE' if creating a
|
// although the request method may actually be 'DELETE' if creating a
|
||||||
// delete marker, for our purposes we consider this to be a 'PUT'
|
// delete marker, for our purposes we consider this to be a 'PUT'
|
||||||
|
@ -187,10 +187,10 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
|
|
||||||
if (isVersionedObj) {
|
if (isVersionedObj) {
|
||||||
log.debug(externalVersioningErrorMessage,
|
log.debug(externalVersioningErrorMessage,
|
||||||
{ method: 'createAndStoreObject', error: errors.NotImplemented });
|
{ method: 'createAndStoreObject', error: errors.NotImplemented });
|
||||||
return process.nextTick(() => {
|
return process.nextTick(() => {
|
||||||
callback(errors.NotImplemented.customizeDescription(
|
callback(errors.NotImplemented.customizeDescription(
|
||||||
externalVersioningErrorMessage));
|
externalVersioningErrorMessage));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -208,7 +208,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
return next(null, null, null);
|
return next(null, null, null);
|
||||||
}
|
}
|
||||||
return dataStore(objectKeyContext, cipherBundle, request, size,
|
return dataStore(objectKeyContext, cipherBundle, request, size,
|
||||||
streamingV4Params, backendInfo, log, next);
|
streamingV4Params, backendInfo, log, next);
|
||||||
},
|
},
|
||||||
function processDataResult(dataGetInfo, calculatedHash, next) {
|
function processDataResult(dataGetInfo, calculatedHash, next) {
|
||||||
if (dataGetInfo === null || dataGetInfo === undefined) {
|
if (dataGetInfo === null || dataGetInfo === undefined) {
|
||||||
|
@ -220,8 +220,8 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
const { key, dataStoreName, dataStoreType, dataStoreETag,
|
const { key, dataStoreName, dataStoreType, dataStoreETag,
|
||||||
dataStoreVersionId } = dataGetInfo;
|
dataStoreVersionId } = dataGetInfo;
|
||||||
const prefixedDataStoreETag = dataStoreETag
|
const prefixedDataStoreETag = dataStoreETag
|
||||||
? `1:${dataStoreETag}`
|
? `1:${dataStoreETag}`
|
||||||
: `1:${calculatedHash}`;
|
: `1:${calculatedHash}`;
|
||||||
const dataGetInfoArr = [{ key, size, start: 0, dataStoreName,
|
const dataGetInfoArr = [{ key, size, start: 0, dataStoreName,
|
||||||
dataStoreType, dataStoreETag: prefixedDataStoreETag,
|
dataStoreType, dataStoreETag: prefixedDataStoreETag,
|
||||||
dataStoreVersionId }];
|
dataStoreVersionId }];
|
||||||
|
@ -239,7 +239,7 @@ function createAndStoreObject(bucketName, bucketMD, objectKey, objMD, authInfo,
|
||||||
if (err) {
|
if (err) {
|
||||||
// TODO: check AWS error when user requested a specific
|
// TODO: check AWS error when user requested a specific
|
||||||
// version before any versions have been put
|
// version before any versions have been put
|
||||||
const logLvl = err === errors.BadRequest ?
|
const logLvl = err.is.BadRequest ?
|
||||||
'debug' : 'error';
|
'debug' : 'error';
|
||||||
log[logLvl]('error getting versioning info', {
|
log[logLvl]('error getting versioning info', {
|
||||||
error: err,
|
error: err,
|
||||||
|
|
|
@ -80,7 +80,7 @@ function _generateExpHeadresMPU(rules, params, datetime) {
|
||||||
const date = calculateDate(
|
const date = calculateDate(
|
||||||
params.date,
|
params.date,
|
||||||
rule.DaysAfterInitiation,
|
rule.DaysAfterInitiation,
|
||||||
datetime
|
datetime,
|
||||||
);
|
);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|
|
@ -24,7 +24,7 @@ function locationConstraintCheck(request, metaHeaders, bucket, log) {
|
||||||
metaHeaders[constants.objectLocationConstraintHeader];
|
metaHeaders[constants.objectLocationConstraintHeader];
|
||||||
} else {
|
} else {
|
||||||
objectLocationConstraint = request
|
objectLocationConstraint = request
|
||||||
.headers[constants.objectLocationConstraintHeader];
|
.headers[constants.objectLocationConstraintHeader];
|
||||||
}
|
}
|
||||||
const bucketLocationConstraint = bucket.getLocationConstraint();
|
const bucketLocationConstraint = bucket.getLocationConstraint();
|
||||||
const requestEndpoint = request.parsedHost;
|
const requestEndpoint = request.parsedHost;
|
||||||
|
@ -35,7 +35,7 @@ function locationConstraintCheck(request, metaHeaders, bucket, log) {
|
||||||
if (!controllingBackend.isValid) {
|
if (!controllingBackend.isValid) {
|
||||||
backendInfoObj = {
|
backendInfoObj = {
|
||||||
err: errors.InvalidArgument.customizeDescription(controllingBackend.
|
err: errors.InvalidArgument.customizeDescription(controllingBackend.
|
||||||
description),
|
description),
|
||||||
};
|
};
|
||||||
return backendInfoObj;
|
return backendInfoObj;
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,48 @@
|
||||||
|
const { errors } = require('arsenal');
|
||||||
|
|
||||||
|
const { config } = require('../../../Config');
|
||||||
|
const { getLocationMetric, pushLocationMetric } =
|
||||||
|
require('../../../utapi/utilities');
|
||||||
|
|
||||||
|
function _gbToBytes(gb) {
|
||||||
|
return gb * 1024 * 1024 * 1024;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* locationStorageCheck - will ensure there is enough space left for object on
|
||||||
|
* PUT operations, or will update metric on DELETE
|
||||||
|
* NOTE: storage limit may not be exactly enforced in the case of concurrent
|
||||||
|
* requests when near limit
|
||||||
|
* @param {string} location - name of location to check quota
|
||||||
|
* @param {number} updateSize - new size to check against quota in bytes
|
||||||
|
* @param {object} log - werelogs logger
|
||||||
|
* @param {function} cb - callback function
|
||||||
|
* @return {undefined}
|
||||||
|
*/
|
||||||
|
function locationStorageCheck(location, updateSize, log, cb) {
|
||||||
|
const lc = config.locationConstraints;
|
||||||
|
const sizeLimitGB = lc[location] ? lc[location].sizeLimitGB : undefined;
|
||||||
|
if (updateSize === 0 || sizeLimitGB === undefined || sizeLimitGB === null) {
|
||||||
|
return cb();
|
||||||
|
}
|
||||||
|
// no need to list location metric, since it should be decreased
|
||||||
|
if (updateSize < 0) {
|
||||||
|
return pushLocationMetric(location, updateSize, log, cb);
|
||||||
|
}
|
||||||
|
return getLocationMetric(location, log, (err, bytesStored) => {
|
||||||
|
if (err) {
|
||||||
|
log.error(`Error listing metrics from Utapi: ${err.message}`);
|
||||||
|
return cb(err);
|
||||||
|
}
|
||||||
|
const newStorageSize = parseInt(bytesStored, 10) + updateSize;
|
||||||
|
const sizeLimitBytes = _gbToBytes(sizeLimitGB);
|
||||||
|
if (sizeLimitBytes < newStorageSize) {
|
||||||
|
return cb(errors.AccessDenied.customizeDescription(
|
||||||
|
`The assigned storage space limit for location ${location} ` +
|
||||||
|
'will be exceeded'));
|
||||||
|
}
|
||||||
|
return pushLocationMetric(location, updateSize, log, cb);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = locationStorageCheck;
|
|
@ -43,7 +43,7 @@ function validateHeaders(bucket, headers, log) {
|
||||||
!(objectLockMode && objectLockDate)) {
|
!(objectLockMode && objectLockDate)) {
|
||||||
return errors.InvalidArgument.customizeDescription(
|
return errors.InvalidArgument.customizeDescription(
|
||||||
'x-amz-object-lock-retain-until-date and ' +
|
'x-amz-object-lock-retain-until-date and ' +
|
||||||
'x-amz-object-lock-mode must both be supplied'
|
'x-amz-object-lock-mode must both be supplied',
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']);
|
const validModes = new Set(['GOVERNANCE', 'COMPLIANCE']);
|
||||||
|
|
|
@ -112,7 +112,7 @@ function generateMpuPartStorageInfo(filteredPartList) {
|
||||||
* and extraPartLocations
|
* and extraPartLocations
|
||||||
*/
|
*/
|
||||||
function validateAndFilterMpuParts(storedParts, jsonList, mpuOverviewKey,
|
function validateAndFilterMpuParts(storedParts, jsonList, mpuOverviewKey,
|
||||||
splitter, log) {
|
splitter, log) {
|
||||||
let storedPartsCopy = [];
|
let storedPartsCopy = [];
|
||||||
const filteredPartsObj = {};
|
const filteredPartsObj = {};
|
||||||
filteredPartsObj.partList = [];
|
filteredPartsObj.partList = [];
|
||||||
|
|
|
@ -2,7 +2,7 @@ const { errors } = require('arsenal');
|
||||||
const {
|
const {
|
||||||
parseRangeSpec,
|
parseRangeSpec,
|
||||||
parseRange,
|
parseRange,
|
||||||
} = require('arsenal/lib/network/http/utils');
|
} = require('arsenal').network.http.utils;
|
||||||
|
|
||||||
const constants = require('../../../../constants');
|
const constants = require('../../../../constants');
|
||||||
const setPartRanges = require('./setPartRanges');
|
const setPartRanges = require('./setPartRanges');
|
||||||
|
@ -43,7 +43,7 @@ function setUpCopyLocator(sourceObjMD, rangeHeader, log) {
|
||||||
// md-model-version 2, need to handle cases where
|
// md-model-version 2, need to handle cases where
|
||||||
// objMD.location is just a string
|
// objMD.location is just a string
|
||||||
dataLocator = Array.isArray(sourceObjMD.location) ?
|
dataLocator = Array.isArray(sourceObjMD.location) ?
|
||||||
sourceObjMD.location : [{ key: sourceObjMD.location }];
|
sourceObjMD.location : [{ key: sourceObjMD.location }];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sourceObjMD['x-amz-server-side-encryption']) {
|
if (sourceObjMD['x-amz-server-side-encryption']) {
|
||||||
|
@ -76,7 +76,7 @@ function setUpCopyLocator(sourceObjMD, rangeHeader, log) {
|
||||||
log.trace('data model before version 2 so ' +
|
log.trace('data model before version 2 so ' +
|
||||||
'cannot support get range copy part');
|
'cannot support get range copy part');
|
||||||
return { error: errors.NotImplemented
|
return { error: errors.NotImplemented
|
||||||
.customizeDescription('Stored object ' +
|
.customizeDescription('Stored object ' +
|
||||||
'has legacy data storage model so does' +
|
'has legacy data storage model so does' +
|
||||||
' not support range headers on copy part'),
|
' not support range headers on copy part'),
|
||||||
};
|
};
|
||||||
|
|
|
@ -81,7 +81,7 @@ function dataStore(objectContext, cipherBundle, stream, size,
|
||||||
dataRetrievalInfo,
|
dataRetrievalInfo,
|
||||||
});
|
});
|
||||||
return checkHashMatchMD5(stream, hashedStream,
|
return checkHashMatchMD5(stream, hashedStream,
|
||||||
dataRetrievalInfo, log, cbOnce);
|
dataRetrievalInfo, log, cbOnce);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ function getVersionIdResHeader(verCfg, objectMD) {
|
||||||
return 'null';
|
return 'null';
|
||||||
}
|
}
|
||||||
return versionIdUtils.encode(objectMD.versionId,
|
return versionIdUtils.encode(objectMD.versionId,
|
||||||
config.versionIdEncodingType);
|
config.versionIdEncodingType);
|
||||||
}
|
}
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
@ -66,7 +66,7 @@ function _storeNullVersionMD(bucketName, objKey, objMD, options, log, cb) {
|
||||||
metadata.putObjectMD(bucketName, objKey, objMD, options, log, err => {
|
metadata.putObjectMD(bucketName, objKey, objMD, options, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error from metadata storing null version as new version',
|
log.debug('error from metadata storing null version as new version',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
}
|
}
|
||||||
cb(err, options);
|
cb(err, options);
|
||||||
});
|
});
|
||||||
|
@ -121,7 +121,7 @@ function _deleteNullVersionMD(bucketName, objKey, options, mst, log, cb) {
|
||||||
err => {
|
err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.warn('metadata error deleting null version',
|
log.warn('metadata error deleting null version',
|
||||||
{ error: err, method: '_deleteNullVersionMD' });
|
{ error: err, method: '_deleteNullVersionMD' });
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
return cb(null, nullDataToDelete);
|
return cb(null, nullDataToDelete);
|
||||||
|
@ -292,7 +292,7 @@ function versioningPreprocessing(bucketName, bucketMD, objectKey, objMD,
|
||||||
// it's possible there was a concurrent request to
|
// it's possible there was a concurrent request to
|
||||||
// delete the null version, so proceed with putting a
|
// delete the null version, so proceed with putting a
|
||||||
// new version
|
// new version
|
||||||
if (err === errors.NoSuchKey) {
|
if (err.is.NoSuchKey) {
|
||||||
return next(null, options);
|
return next(null, options);
|
||||||
}
|
}
|
||||||
return next(errors.InternalError);
|
return next(errors.InternalError);
|
||||||
|
|
|
@ -113,7 +113,7 @@ function processVersions(bucketName, listParams, list, encType) {
|
||||||
xml.push(
|
xml.push(
|
||||||
'<?xml version="1.0" encoding="UTF-8"?>',
|
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||||
'<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
'<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
||||||
'<Name>', bucketName, '</Name>'
|
'<Name>', bucketName, '</Name>',
|
||||||
);
|
);
|
||||||
const isTruncated = list.IsTruncated ? 'true' : 'false';
|
const isTruncated = list.IsTruncated ? 'true' : 'false';
|
||||||
const xmlParams = [
|
const xmlParams = [
|
||||||
|
@ -160,7 +160,7 @@ function processVersions(bucketName, listParams, list, encType) {
|
||||||
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
|
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
|
||||||
'</Owner>',
|
'</Owner>',
|
||||||
`<StorageClass>${v.StorageClass}</StorageClass>`,
|
`<StorageClass>${v.StorageClass}</StorageClass>`,
|
||||||
v.IsDeleteMarker ? '</DeleteMarker>' : '</Version>'
|
v.IsDeleteMarker ? '</DeleteMarker>' : '</Version>',
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
list.CommonPrefixes.forEach(item => {
|
list.CommonPrefixes.forEach(item => {
|
||||||
|
@ -176,7 +176,7 @@ function processMasterVersions(bucketName, listParams, list) {
|
||||||
xml.push(
|
xml.push(
|
||||||
'<?xml version="1.0" encoding="UTF-8"?>',
|
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||||
'<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
'<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
||||||
'<Name>', bucketName, '</Name>'
|
'<Name>', bucketName, '</Name>',
|
||||||
);
|
);
|
||||||
const isTruncated = list.IsTruncated ? 'true' : 'false';
|
const isTruncated = list.IsTruncated ? 'true' : 'false';
|
||||||
const xmlParams = [
|
const xmlParams = [
|
||||||
|
@ -234,19 +234,19 @@ function processMasterVersions(bucketName, listParams, list) {
|
||||||
`<Key>${objectKey}</Key>`,
|
`<Key>${objectKey}</Key>`,
|
||||||
`<LastModified>${v.LastModified}</LastModified>`,
|
`<LastModified>${v.LastModified}</LastModified>`,
|
||||||
`<ETag>"${v.ETag}"</ETag>`,
|
`<ETag>"${v.ETag}"</ETag>`,
|
||||||
`<Size>${v.Size}</Size>`
|
`<Size>${v.Size}</Size>`,
|
||||||
);
|
);
|
||||||
if (!listParams.v2 || listParams.fetchOwner) {
|
if (!listParams.v2 || listParams.fetchOwner) {
|
||||||
xml.push(
|
xml.push(
|
||||||
'<Owner>',
|
'<Owner>',
|
||||||
`<ID>${v.Owner.ID}</ID>`,
|
`<ID>${v.Owner.ID}</ID>`,
|
||||||
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
|
`<DisplayName>${v.Owner.DisplayName}</DisplayName>`,
|
||||||
'</Owner>'
|
'</Owner>',
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return xml.push(
|
return xml.push(
|
||||||
`<StorageClass>${v.StorageClass}</StorageClass>`,
|
`<StorageClass>${v.StorageClass}</StorageClass>`,
|
||||||
'</Contents>'
|
'</Contents>',
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
list.CommonPrefixes.forEach(item => {
|
list.CommonPrefixes.forEach(item => {
|
||||||
|
@ -266,7 +266,7 @@ function handleResult(listParams, requestMaxKeys, encoding, authInfo,
|
||||||
let res;
|
let res;
|
||||||
if (listParams.listingType === 'DelimiterVersions') {
|
if (listParams.listingType === 'DelimiterVersions') {
|
||||||
res = processVersions(bucketName, listParams, list,
|
res = processVersions(bucketName, listParams, list,
|
||||||
config.versionIdEncodingType);
|
config.versionIdEncodingType);
|
||||||
} else {
|
} else {
|
||||||
res = processMasterVersions(bucketName, listParams, list);
|
res = processMasterVersions(bucketName, listParams, list);
|
||||||
}
|
}
|
||||||
|
@ -364,14 +364,14 @@ function bucketGet(authInfo, request, log, callback) {
|
||||||
bucketName, emptyList, corsHeaders, log, callback);
|
bucketName, emptyList, corsHeaders, log, callback);
|
||||||
}
|
}
|
||||||
return services.getObjectListing(bucketName, listParams, log,
|
return services.getObjectListing(bucketName, listParams, log,
|
||||||
(err, list) => {
|
(err, list) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', { error: err });
|
log.debug('error processing request', { error: err });
|
||||||
return callback(err, null, corsHeaders);
|
return callback(err, null, corsHeaders);
|
||||||
}
|
}
|
||||||
return handleResult(listParams, requestMaxKeys, encoding, authInfo,
|
return handleResult(listParams, requestMaxKeys, encoding, authInfo,
|
||||||
bucketName, list, corsHeaders, log, callback);
|
bucketName, list, corsHeaders, log, callback);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,7 @@ function bucketGetEncryption(authInfo, request, log, callback) {
|
||||||
'</ApplyServerSideEncryptionByDefault>',
|
'</ApplyServerSideEncryptionByDefault>',
|
||||||
'<BucketKeyEnabled>false</BucketKeyEnabled>',
|
'<BucketKeyEnabled>false</BucketKeyEnabled>',
|
||||||
'</Rule>',
|
'</Rule>',
|
||||||
'</ServerSideEncryptionConfiguration>'
|
'</ServerSideEncryptionConfiguration>',
|
||||||
);
|
);
|
||||||
|
|
||||||
pushMetric('getBucketEncryption', log, {
|
pushMetric('getBucketEncryption', log, {
|
||||||
|
|
|
@ -46,10 +46,10 @@ function bucketGetLocation(authInfo, request, log, callback) {
|
||||||
|
|
||||||
let locationConstraint = bucket.getLocationConstraint();
|
let locationConstraint = bucket.getLocationConstraint();
|
||||||
if (!locationConstraint || locationConstraint === 'us-east-1') {
|
if (!locationConstraint || locationConstraint === 'us-east-1') {
|
||||||
// AWS returns empty string if no region has been
|
// AWS returns empty string if no region has been
|
||||||
// provided or for us-east-1
|
// provided or for us-east-1
|
||||||
// Note: AWS JS SDK sends a request with locationConstraint us-east-1
|
// Note: AWS JS SDK sends a request with locationConstraint us-east-1
|
||||||
// if no locationConstraint provided.
|
// if no locationConstraint provided.
|
||||||
locationConstraint = '';
|
locationConstraint = '';
|
||||||
}
|
}
|
||||||
const xml = `<?xml version="1.0" encoding="UTF-8"?>
|
const xml = `<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
|
|
@ -19,8 +19,8 @@ function convertToXml(versioningConfiguration) {
|
||||||
const xml = [];
|
const xml = [];
|
||||||
|
|
||||||
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
|
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
|
||||||
'<VersioningConfiguration ' +
|
'<VersioningConfiguration ' +
|
||||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">'
|
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
||||||
);
|
);
|
||||||
|
|
||||||
if (versioningConfiguration && versioningConfiguration.Status) {
|
if (versioningConfiguration && versioningConfiguration.Status) {
|
||||||
|
|
|
@ -50,9 +50,9 @@ function checkLocationConstraint(request, locationConstraint, log) {
|
||||||
`${locationConstraintChecked} - is not listed in the ` +
|
`${locationConstraintChecked} - is not listed in the ` +
|
||||||
'locationConstraint config';
|
'locationConstraint config';
|
||||||
log.trace(`locationConstraint is invalid - ${errMsg}`,
|
log.trace(`locationConstraint is invalid - ${errMsg}`,
|
||||||
{ locationConstraint: locationConstraintChecked });
|
{ locationConstraint: locationConstraintChecked });
|
||||||
return { error: errors.InvalidLocationConstraint.
|
return { error: errors.InvalidLocationConstraint.
|
||||||
customizeDescription(errMsg) };
|
customizeDescription(errMsg) };
|
||||||
}
|
}
|
||||||
return { error: null, locationConstraint: locationConstraintChecked };
|
return { error: null, locationConstraint: locationConstraintChecked };
|
||||||
}
|
}
|
||||||
|
@ -80,7 +80,7 @@ function _parseXML(request, log, cb) {
|
||||||
log.trace('location constraint',
|
log.trace('location constraint',
|
||||||
{ locationConstraint });
|
{ locationConstraint });
|
||||||
const locationCheck = checkLocationConstraint(request,
|
const locationCheck = checkLocationConstraint(request,
|
||||||
locationConstraint, log);
|
locationConstraint, log);
|
||||||
if (locationCheck.error) {
|
if (locationCheck.error) {
|
||||||
return cb(locationCheck.error);
|
return cb(locationCheck.error);
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ function _parseXML(request, log, cb) {
|
||||||
}
|
}
|
||||||
return process.nextTick(() => {
|
return process.nextTick(() => {
|
||||||
const locationCheck = checkLocationConstraint(request,
|
const locationCheck = checkLocationConstraint(request,
|
||||||
undefined, log);
|
undefined, log);
|
||||||
if (locationCheck.error) {
|
if (locationCheck.error) {
|
||||||
return cb(locationCheck.error);
|
return cb(locationCheck.error);
|
||||||
}
|
}
|
||||||
|
@ -167,20 +167,20 @@ function bucketPut(authInfo, request, log, callback) {
|
||||||
return next(null, locationConstraint);
|
return next(null, locationConstraint);
|
||||||
},
|
},
|
||||||
(locationConstraint, next) => createBucket(authInfo, bucketName,
|
(locationConstraint, next) => createBucket(authInfo, bucketName,
|
||||||
request.headers, locationConstraint, log, (err, previousBucket) => {
|
request.headers, locationConstraint, log, (err, previousBucket) => {
|
||||||
// if bucket already existed, gather any relevant cors
|
// if bucket already existed, gather any relevant cors
|
||||||
// headers
|
// headers
|
||||||
const corsHeaders = collectCorsHeaders(
|
const corsHeaders = collectCorsHeaders(
|
||||||
request.headers.origin, request.method, previousBucket);
|
request.headers.origin, request.method, previousBucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, corsHeaders);
|
return next(err, corsHeaders);
|
||||||
}
|
}
|
||||||
pushMetric('createBucket', log, {
|
pushMetric('createBucket', log, {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucket: bucketName,
|
bucket: bucketName,
|
||||||
});
|
});
|
||||||
return next(null, corsHeaders);
|
return next(null, corsHeaders);
|
||||||
}),
|
}),
|
||||||
], callback);
|
], callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -103,16 +103,16 @@ function bucketPutACL(authInfo, request, log, callback) {
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
function waterfall1(next) {
|
function waterfall1(next) {
|
||||||
metadataValidateBucket(metadataValParams, log,
|
metadataValidateBucket(metadataValParams, log,
|
||||||
(err, bucket) => {
|
(err, bucket) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('request authorization failed', {
|
log.trace('request authorization failed', {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'metadataValidateBucket',
|
method: 'metadataValidateBucket',
|
||||||
});
|
});
|
||||||
return next(err, bucket);
|
return next(err, bucket);
|
||||||
}
|
}
|
||||||
return next(null, bucket);
|
return next(null, bucket);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function waterfall2(bucket, next) {
|
function waterfall2(bucket, next) {
|
||||||
// If not setting acl through headers, parse body
|
// If not setting acl through headers, parse body
|
||||||
|
@ -179,7 +179,7 @@ function bucketPutACL(authInfo, request, log, callback) {
|
||||||
if (!skip && granteeType === 'Group') {
|
if (!skip && granteeType === 'Group') {
|
||||||
if (possibleGroups.indexOf(grantee.URI[0]) < 0) {
|
if (possibleGroups.indexOf(grantee.URI[0]) < 0) {
|
||||||
log.trace('invalid user group',
|
log.trace('invalid user group',
|
||||||
{ userGroup: grantee.URI[0] });
|
{ userGroup: grantee.URI[0] });
|
||||||
return next(errors.InvalidArgument, bucket);
|
return next(errors.InvalidArgument, bucket);
|
||||||
}
|
}
|
||||||
return usersIdentifiedByGroup.push({
|
return usersIdentifiedByGroup.push({
|
||||||
|
@ -195,15 +195,15 @@ function bucketPutACL(authInfo, request, log, callback) {
|
||||||
// through the access headers
|
// through the access headers
|
||||||
const allGrantHeaders =
|
const allGrantHeaders =
|
||||||
[].concat(grantReadHeader, grantWriteHeader,
|
[].concat(grantReadHeader, grantWriteHeader,
|
||||||
grantReadACPHeader, grantWriteACPHeader,
|
grantReadACPHeader, grantWriteACPHeader,
|
||||||
grantFullControlHeader);
|
grantFullControlHeader);
|
||||||
|
|
||||||
usersIdentifiedByEmail = allGrantHeaders.filter(item =>
|
usersIdentifiedByEmail = allGrantHeaders.filter(item =>
|
||||||
item && item.userIDType.toLowerCase() === 'emailaddress');
|
item && item.userIDType.toLowerCase() === 'emailaddress');
|
||||||
|
|
||||||
usersIdentifiedByGroup = allGrantHeaders
|
usersIdentifiedByGroup = allGrantHeaders
|
||||||
.filter(itm => itm && itm.userIDType
|
.filter(itm => itm && itm.userIDType
|
||||||
.toLowerCase() === 'uri');
|
.toLowerCase() === 'uri');
|
||||||
for (let i = 0; i < usersIdentifiedByGroup.length; i++) {
|
for (let i = 0; i < usersIdentifiedByGroup.length; i++) {
|
||||||
const userGroup = usersIdentifiedByGroup[i].identifier;
|
const userGroup = usersIdentifiedByGroup[i].identifier;
|
||||||
if (possibleGroups.indexOf(userGroup) < 0) {
|
if (possibleGroups.indexOf(userGroup) < 0) {
|
||||||
|
|
|
@ -27,7 +27,7 @@ function bucketPutCors(authInfo, request, log, callback) {
|
||||||
|
|
||||||
if (!request.post) {
|
if (!request.post) {
|
||||||
log.debug('CORS xml body is missing',
|
log.debug('CORS xml body is missing',
|
||||||
{ error: errors.MissingRequestBodyError });
|
{ error: errors.MissingRequestBodyError });
|
||||||
return callback(errors.MissingRequestBodyError);
|
return callback(errors.MissingRequestBodyError);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -105,7 +105,7 @@ function bucketPutVersioning(authInfo, request, log, callback) {
|
||||||
if (!_checkBackendVersioningImplemented(bucket)) {
|
if (!_checkBackendVersioningImplemented(bucket)) {
|
||||||
log.debug(externalVersioningErrorMessage,
|
log.debug(externalVersioningErrorMessage,
|
||||||
{ method: 'bucketPutVersioning',
|
{ method: 'bucketPutVersioning',
|
||||||
error: errors.NotImplemented });
|
error: errors.NotImplemented });
|
||||||
const error = errors.NotImplemented.customizeDescription(
|
const error = errors.NotImplemented.customizeDescription(
|
||||||
externalVersioningErrorMessage);
|
externalVersioningErrorMessage);
|
||||||
return next(error, bucket);
|
return next(error, bucket);
|
||||||
|
|
|
@ -50,7 +50,7 @@ const REPLICATION_ACTION = 'MPU';
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Format of xml response:
|
Format of xml response:
|
||||||
<?xml version='1.0' encoding='UTF-8'?>
|
<?xml version='1.0' encoding='UTF-8'?>
|
||||||
<CompleteMultipartUploadResult
|
<CompleteMultipartUploadResult
|
||||||
|
@ -137,7 +137,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function parsePartsList(destBucket, objMD, mpuBucket,
|
function parsePartsList(destBucket, objMD, mpuBucket,
|
||||||
storedMetadata, next) {
|
storedMetadata, next) {
|
||||||
const location = storedMetadata.controllingLocationConstraint;
|
const location = storedMetadata.controllingLocationConstraint;
|
||||||
// BACKWARD: Remove to remove the old splitter
|
// BACKWARD: Remove to remove the old splitter
|
||||||
if (mpuBucket.getMdBucketModelVersion() < 2) {
|
if (mpuBucket.getMdBucketModelVersion() < 2) {
|
||||||
|
@ -159,7 +159,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
return next(errors.MalformedXML, destBucket);
|
return next(errors.MalformedXML, destBucket);
|
||||||
},
|
},
|
||||||
function markOverviewForCompletion(destBucket, objMD, mpuBucket, jsonList,
|
function markOverviewForCompletion(destBucket, objMD, mpuBucket, jsonList,
|
||||||
storedMetadata, location, mpuOverviewKey, next) {
|
storedMetadata, location, mpuOverviewKey, next) {
|
||||||
return services.metadataMarkMPObjectForCompletion({
|
return services.metadataMarkMPObjectForCompletion({
|
||||||
bucketName: mpuBucket.getName(),
|
bucketName: mpuBucket.getName(),
|
||||||
objectKey,
|
objectKey,
|
||||||
|
@ -171,11 +171,11 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
return next(null, destBucket, objMD, mpuBucket,
|
return next(null, destBucket, objMD, mpuBucket,
|
||||||
jsonList, storedMetadata, location, mpuOverviewKey);
|
jsonList, storedMetadata, location, mpuOverviewKey);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function retrieveParts(destBucket, objMD, mpuBucket, jsonList,
|
function retrieveParts(destBucket, objMD, mpuBucket, jsonList,
|
||||||
storedMetadata, location, mpuOverviewKey, next) {
|
storedMetadata, location, mpuOverviewKey, next) {
|
||||||
return services.getMPUparts(mpuBucket.getName(), uploadId, log,
|
return services.getMPUparts(mpuBucket.getName(), uploadId, log,
|
||||||
(err, result) => {
|
(err, result) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -187,7 +187,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function ifMultipleBackend(destBucket, objMD, mpuBucket, storedParts,
|
function ifMultipleBackend(destBucket, objMD, mpuBucket, storedParts,
|
||||||
jsonList, storedMetadata, location, mpuOverviewKey, next) {
|
jsonList, storedMetadata, location, mpuOverviewKey, next) {
|
||||||
if (config.backends.data === 'multiple') {
|
if (config.backends.data === 'multiple') {
|
||||||
// if mpu was initiated in legacy version
|
// if mpu was initiated in legacy version
|
||||||
if (location === undefined) {
|
if (location === undefined) {
|
||||||
|
@ -203,26 +203,26 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
const mdInfo = { storedParts, mpuOverviewKey, splitter };
|
const mdInfo = { storedParts, mpuOverviewKey, splitter };
|
||||||
return multipleBackendGateway.completeMPU(objectKey,
|
return multipleBackendGateway.completeMPU(objectKey,
|
||||||
uploadId, location, jsonList, mdInfo, bucketName, null, null,
|
uploadId, location, jsonList, mdInfo, bucketName, null, null,
|
||||||
log, (err, completeObjData) => {
|
log, (err, completeObjData) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destBucket);
|
return next(err, destBucket);
|
||||||
}
|
}
|
||||||
return next(null, destBucket, objMD, mpuBucket, storedParts,
|
return next(null, destBucket, objMD, mpuBucket, storedParts,
|
||||||
jsonList, storedMetadata, completeObjData,
|
jsonList, storedMetadata, completeObjData,
|
||||||
mpuOverviewKey);
|
mpuOverviewKey);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
return next(null, destBucket, objMD, mpuBucket, storedParts,
|
return next(null, destBucket, objMD, mpuBucket, storedParts,
|
||||||
jsonList, storedMetadata, null, mpuOverviewKey);
|
jsonList, storedMetadata, null, mpuOverviewKey);
|
||||||
},
|
},
|
||||||
function validateAndFilterParts(destBucket, objMD, mpuBucket,
|
function validateAndFilterParts(destBucket, objMD, mpuBucket,
|
||||||
storedParts, jsonList, storedMetadata, completeObjData, mpuOverviewKey,
|
storedParts, jsonList, storedMetadata, completeObjData, mpuOverviewKey,
|
||||||
next) {
|
next) {
|
||||||
if (completeObjData) {
|
if (completeObjData) {
|
||||||
return next(null, destBucket, objMD, mpuBucket, storedParts,
|
return next(null, destBucket, objMD, mpuBucket, storedParts,
|
||||||
jsonList, storedMetadata, completeObjData, mpuOverviewKey,
|
jsonList, storedMetadata, completeObjData, mpuOverviewKey,
|
||||||
completeObjData.filteredPartsObj);
|
completeObjData.filteredPartsObj);
|
||||||
}
|
}
|
||||||
const filteredPartsObj = validateAndFilterMpuParts(storedParts,
|
const filteredPartsObj = validateAndFilterMpuParts(storedParts,
|
||||||
jsonList, mpuOverviewKey, splitter, log);
|
jsonList, mpuOverviewKey, splitter, log);
|
||||||
|
@ -234,8 +234,8 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
filteredPartsObj);
|
filteredPartsObj);
|
||||||
},
|
},
|
||||||
function processParts(destBucket, objMD, mpuBucket, storedParts,
|
function processParts(destBucket, objMD, mpuBucket, storedParts,
|
||||||
jsonList, storedMetadata, completeObjData, mpuOverviewKey,
|
jsonList, storedMetadata, completeObjData, mpuOverviewKey,
|
||||||
filteredPartsObj, next) {
|
filteredPartsObj, next) {
|
||||||
// if mpu was completed on backend that stored mpu MD externally,
|
// if mpu was completed on backend that stored mpu MD externally,
|
||||||
// skip MD processing steps
|
// skip MD processing steps
|
||||||
if (completeObjData && skipMpuPartProcessing(completeObjData)) {
|
if (completeObjData && skipMpuPartProcessing(completeObjData)) {
|
||||||
|
@ -341,7 +341,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
if (err) {
|
if (err) {
|
||||||
// TODO: check AWS error when user requested a specific
|
// TODO: check AWS error when user requested a specific
|
||||||
// version before any versions have been put
|
// version before any versions have been put
|
||||||
const logLvl = err === errors.BadRequest ?
|
const logLvl = err.is.BadRequest ?
|
||||||
'debug' : 'error';
|
'debug' : 'error';
|
||||||
log[logLvl]('error getting versioning info', {
|
log[logLvl]('error getting versioning info', {
|
||||||
error: err,
|
error: err,
|
||||||
|
@ -428,10 +428,10 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
if (dataToDelete) {
|
if (dataToDelete) {
|
||||||
const newDataStoreName =
|
const newDataStoreName =
|
||||||
Array.isArray(dataLocations) && dataLocations[0] ?
|
Array.isArray(dataLocations) && dataLocations[0] ?
|
||||||
dataLocations[0].dataStoreName : null;
|
dataLocations[0].dataStoreName : null;
|
||||||
const delLog =
|
const delLog =
|
||||||
logger.newRequestLoggerFromSerializedUids(log
|
logger.newRequestLoggerFromSerializedUids(log
|
||||||
.getSerializedUids());
|
.getSerializedUids());
|
||||||
return data.batchDelete(dataToDelete,
|
return data.batchDelete(dataToDelete,
|
||||||
request.method,
|
request.method,
|
||||||
newDataStoreName, delLog, err => {
|
newDataStoreName, delLog, err => {
|
||||||
|
@ -481,7 +481,7 @@ function completeMultipartUpload(authInfo, request, log, callback) {
|
||||||
if (generatedVersionId) {
|
if (generatedVersionId) {
|
||||||
corsHeaders['x-amz-version-id'] =
|
corsHeaders['x-amz-version-id'] =
|
||||||
versionIdUtils.encode(generatedVersionId,
|
versionIdUtils.encode(generatedVersionId,
|
||||||
config.versionIdEncodingType);
|
config.versionIdEncodingType);
|
||||||
}
|
}
|
||||||
Object.assign(responseHeaders, corsHeaders);
|
Object.assign(responseHeaders, corsHeaders);
|
||||||
|
|
||||||
|
|
|
@ -132,7 +132,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
|
|
||||||
function _getMPUBucket(destinationBucket, log, corsHeaders,
|
function _getMPUBucket(destinationBucket, log, corsHeaders,
|
||||||
uploadId, cipherBundle, locConstraint, callback) {
|
uploadId, cipherBundle, locConstraint, callback) {
|
||||||
const xmlParams = {
|
const xmlParams = {
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
|
@ -228,45 +228,45 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
let uploadId;
|
let uploadId;
|
||||||
if (config.backends.data === 'multiple') {
|
if (config.backends.data === 'multiple') {
|
||||||
return multipleBackendGateway.createMPU(objectKey, metaHeaders,
|
return multipleBackendGateway.createMPU(objectKey, metaHeaders,
|
||||||
bucketName, websiteRedirectHeader, locConstraint, undefined,
|
bucketName, websiteRedirectHeader, locConstraint, undefined,
|
||||||
undefined, undefined, undefined, tagging, log,
|
undefined, undefined, undefined, tagging, log,
|
||||||
(err, dataBackendResObj) => {
|
(err, dataBackendResObj) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
if (locConstraint &&
|
if (locConstraint &&
|
||||||
config.locationConstraints[locConstraint] &&
|
config.locationConstraints[locConstraint] &&
|
||||||
config.locationConstraints[locConstraint].type &&
|
config.locationConstraints[locConstraint].type &&
|
||||||
constants.versioningNotImplBackends[config
|
constants.versioningNotImplBackends[config
|
||||||
.locationConstraints[locConstraint].type]
|
.locationConstraints[locConstraint].type]
|
||||||
) {
|
) {
|
||||||
const vcfg = destinationBucket.getVersioningConfiguration();
|
const vcfg = destinationBucket.getVersioningConfiguration();
|
||||||
const isVersionedObj = vcfg && vcfg.Status === 'Enabled';
|
const isVersionedObj = vcfg && vcfg.Status === 'Enabled';
|
||||||
if (isVersionedObj) {
|
if (isVersionedObj) {
|
||||||
log.debug(externalVersioningErrorMessage,
|
log.debug(externalVersioningErrorMessage,
|
||||||
{ method: 'initiateMultipartUpload',
|
{ method: 'initiateMultipartUpload',
|
||||||
error: errors.NotImplemented });
|
error: errors.NotImplemented });
|
||||||
return callback(errors.NotImplemented
|
return callback(errors.NotImplemented
|
||||||
.customizeDescription(externalVersioningErrorMessage));
|
.customizeDescription(externalVersioningErrorMessage));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
if (dataBackendResObj) {
|
||||||
if (dataBackendResObj) {
|
|
||||||
// dataBackendResObj will be returned in data backend
|
// dataBackendResObj will be returned in data backend
|
||||||
// handles mpu
|
// handles mpu
|
||||||
uploadId = dataBackendResObj.UploadId;
|
uploadId = dataBackendResObj.UploadId;
|
||||||
} else {
|
} else {
|
||||||
uploadId = uuidv4().replace(/-/g, '');
|
uploadId = uuidv4().replace(/-/g, '');
|
||||||
}
|
}
|
||||||
|
|
||||||
return _getMPUBucket(destinationBucket, log, corsHeaders,
|
return _getMPUBucket(destinationBucket, log, corsHeaders,
|
||||||
uploadId, cipherBundle, locConstraint, callback);
|
uploadId, cipherBundle, locConstraint, callback);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
// Generate uniqueID without dashes so that routing not messed up
|
// Generate uniqueID without dashes so that routing not messed up
|
||||||
uploadId = uuidv4().replace(/-/g, '');
|
uploadId = uuidv4().replace(/-/g, '');
|
||||||
|
|
||||||
return _getMPUBucket(destinationBucket, log, corsHeaders,
|
return _getMPUBucket(destinationBucket, log, corsHeaders,
|
||||||
uploadId, cipherBundle, locConstraint, callback);
|
uploadId, cipherBundle, locConstraint, callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
async.waterfall([
|
async.waterfall([
|
||||||
|
@ -291,24 +291,24 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
if (destinationBucket.hasTransientFlag() || destinationBucket.hasDeletedFlag()) {
|
if (destinationBucket.hasTransientFlag() || destinationBucket.hasDeletedFlag()) {
|
||||||
log.trace('transient or deleted flag so cleaning up bucket');
|
log.trace('transient or deleted flag so cleaning up bucket');
|
||||||
return cleanUpBucket(
|
return cleanUpBucket(
|
||||||
destinationBucket,
|
destinationBucket,
|
||||||
accountCanonicalID,
|
accountCanonicalID,
|
||||||
log,
|
log,
|
||||||
error => {
|
error => {
|
||||||
if (error) {
|
if (error) {
|
||||||
log.debug('error cleaning up bucket with flag',
|
log.debug('error cleaning up bucket with flag',
|
||||||
{
|
{
|
||||||
error,
|
error,
|
||||||
transientFlag: destinationBucket.hasTransientFlag(),
|
transientFlag: destinationBucket.hasTransientFlag(),
|
||||||
deletedFlag: destinationBucket.hasDeletedFlag(),
|
deletedFlag: destinationBucket.hasDeletedFlag(),
|
||||||
});
|
});
|
||||||
// To avoid confusing user with error
|
// To avoid confusing user with error
|
||||||
// from cleaning up
|
// from cleaning up
|
||||||
// bucket return InternalError
|
// bucket return InternalError
|
||||||
return next(errors.InternalError, corsHeaders);
|
return next(errors.InternalError, corsHeaders);
|
||||||
}
|
}
|
||||||
return next(null, corsHeaders, destinationBucket);
|
return next(null, corsHeaders, destinationBucket);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
return next(null, corsHeaders, destinationBucket);
|
return next(null, corsHeaders, destinationBucket);
|
||||||
},
|
},
|
||||||
|
@ -326,15 +326,15 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
return next(error, corsHeaders);
|
return next(error, corsHeaders);
|
||||||
}
|
}
|
||||||
return next(null, corsHeaders, destinationBucket, objectSSEConfig);
|
return next(null, corsHeaders, destinationBucket, objectSSEConfig);
|
||||||
}
|
},
|
||||||
),
|
),
|
||||||
],
|
],
|
||||||
(error, corsHeaders, destinationBucket, objectSSEConfig) => {
|
(error, corsHeaders, destinationBucket, objectSSEConfig) => {
|
||||||
if (error) {
|
if (error) {
|
||||||
return callback(error, null, corsHeaders);
|
return callback(error, null, corsHeaders);
|
||||||
}
|
|
||||||
return _storetheMPObject(destinationBucket, corsHeaders, objectSSEConfig);
|
|
||||||
}
|
}
|
||||||
|
return _storetheMPObject(destinationBucket, corsHeaders, objectSSEConfig);
|
||||||
|
},
|
||||||
);
|
);
|
||||||
return undefined;
|
return undefined;
|
||||||
}
|
}
|
||||||
|
|
|
@ -90,7 +90,7 @@ function listParts(authInfo, request, log, callback) {
|
||||||
}
|
}
|
||||||
const partNumberMarker =
|
const partNumberMarker =
|
||||||
Number.parseInt(request.query['part-number-marker'], 10) ?
|
Number.parseInt(request.query['part-number-marker'], 10) ?
|
||||||
Number.parseInt(request.query['part-number-marker'], 10) : 0;
|
Number.parseInt(request.query['part-number-marker'], 10) : 0;
|
||||||
const metadataValMPUparams = {
|
const metadataValMPUparams = {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucketName,
|
bucketName,
|
||||||
|
@ -160,22 +160,22 @@ function listParts(authInfo, request, log, callback) {
|
||||||
location = mpuOverviewObj.controllingLocationConstraint;
|
location = mpuOverviewObj.controllingLocationConstraint;
|
||||||
}
|
}
|
||||||
return multipleBackendGateway.listParts(objectKey, uploadId,
|
return multipleBackendGateway.listParts(objectKey, uploadId,
|
||||||
location, bucketName, partNumberMarker, maxParts, log,
|
location, bucketName, partNumberMarker, maxParts, log,
|
||||||
(err, backendPartList) => {
|
(err, backendPartList) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destBucket);
|
return next(err, destBucket);
|
||||||
} else if (backendPartList) {
|
} else if (backendPartList) {
|
||||||
return next(null, destBucket, mpuBucket,
|
return next(null, destBucket, mpuBucket,
|
||||||
mpuOverviewObj, backendPartList);
|
mpuOverviewObj, backendPartList);
|
||||||
}
|
}
|
||||||
return next(null, destBucket, mpuBucket, mpuOverviewObj,
|
return next(null, destBucket, mpuBucket, mpuOverviewObj,
|
||||||
null);
|
null);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
return next(null, destBucket, mpuBucket, mpuOverviewObj, null);
|
return next(null, destBucket, mpuBucket, mpuOverviewObj, null);
|
||||||
},
|
},
|
||||||
function waterfall4(destBucket, mpuBucket, mpuOverviewObj,
|
function waterfall4(destBucket, mpuBucket, mpuOverviewObj,
|
||||||
backendPartList, next) {
|
backendPartList, next) {
|
||||||
// if parts were returned from cloud backend, they were not
|
// if parts were returned from cloud backend, they were not
|
||||||
// stored in Scality S3 metadata, so this step can be skipped
|
// stored in Scality S3 metadata, so this step can be skipped
|
||||||
if (backendPartList) {
|
if (backendPartList) {
|
||||||
|
@ -195,13 +195,13 @@ function listParts(authInfo, request, log, callback) {
|
||||||
splitter,
|
splitter,
|
||||||
};
|
};
|
||||||
return services.getSomeMPUparts(getPartsParams,
|
return services.getSomeMPUparts(getPartsParams,
|
||||||
(err, storedParts) => {
|
(err, storedParts) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destBucket, null);
|
return next(err, destBucket, null);
|
||||||
}
|
}
|
||||||
return next(null, destBucket, mpuBucket, storedParts,
|
return next(null, destBucket, mpuBucket, storedParts,
|
||||||
mpuOverviewObj);
|
mpuOverviewObj);
|
||||||
});
|
});
|
||||||
}, function waterfall5(destBucket, mpuBucket, storedParts,
|
}, function waterfall5(destBucket, mpuBucket, storedParts,
|
||||||
mpuOverviewObj, next) {
|
mpuOverviewObj, next) {
|
||||||
const encodingFn = encoding === 'url'
|
const encodingFn = encoding === 'url'
|
||||||
|
@ -245,7 +245,7 @@ function listParts(authInfo, request, log, callback) {
|
||||||
xml.push(
|
xml.push(
|
||||||
'<?xml version="1.0" encoding="UTF-8"?>',
|
'<?xml version="1.0" encoding="UTF-8"?>',
|
||||||
'<ListPartsResult xmlns="http://s3.amazonaws.com/doc/' +
|
'<ListPartsResult xmlns="http://s3.amazonaws.com/doc/' +
|
||||||
'2006-03-01/">'
|
'2006-03-01/">',
|
||||||
);
|
);
|
||||||
buildXML([
|
buildXML([
|
||||||
{ tag: 'Bucket', value: bucketName },
|
{ tag: 'Bucket', value: bucketName },
|
||||||
|
|
|
@ -40,7 +40,7 @@ const versionIdUtils = versioning.VersionID;
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Format of xml response:
|
Format of xml response:
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
<DeleteResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||||
|
@ -72,19 +72,19 @@ function _formatXML(quietSetting, errorResults, deleted) {
|
||||||
let errorXML = [];
|
let errorXML = [];
|
||||||
errorResults.forEach(errorObj => {
|
errorResults.forEach(errorObj => {
|
||||||
errorXML.push(
|
errorXML.push(
|
||||||
'<Error>',
|
'<Error>',
|
||||||
'<Key>', escapeForXml(errorObj.entry.key), '</Key>',
|
'<Key>', escapeForXml(errorObj.entry.key), '</Key>',
|
||||||
'<Code>', escapeForXml(errorObj.error.message), '</Code>');
|
'<Code>', escapeForXml(errorObj.error.message), '</Code>');
|
||||||
if (errorObj.entry.versionId) {
|
if (errorObj.entry.versionId) {
|
||||||
const version = errorObj.entry.versionId === 'null' ?
|
const version = errorObj.entry.versionId === 'null' ?
|
||||||
'null' : escapeForXml(errorObj.entry.versionId);
|
'null' : escapeForXml(errorObj.entry.versionId);
|
||||||
errorXML.push('<VersionId>', version, '</VersionId>');
|
errorXML.push('<VersionId>', version, '</VersionId>');
|
||||||
}
|
}
|
||||||
errorXML.push(
|
errorXML.push(
|
||||||
'<Message>',
|
'<Message>',
|
||||||
escapeForXml(errorObj.error.description),
|
escapeForXml(errorObj.error.description),
|
||||||
'</Message>',
|
'</Message>',
|
||||||
'</Error>'
|
'</Error>',
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
errorXML = errorXML.join('');
|
errorXML = errorXML.join('');
|
||||||
|
@ -110,13 +110,13 @@ function _formatXML(quietSetting, errorResults, deleted) {
|
||||||
'<Deleted>',
|
'<Deleted>',
|
||||||
'<Key>',
|
'<Key>',
|
||||||
escapeForXml(version.entry.key),
|
escapeForXml(version.entry.key),
|
||||||
'</Key>'
|
'</Key>',
|
||||||
);
|
);
|
||||||
if (version.entry.versionId) {
|
if (version.entry.versionId) {
|
||||||
deletedXML.push(
|
deletedXML.push(
|
||||||
'<VersionId>',
|
'<VersionId>',
|
||||||
escapeForXml(version.entry.versionId),
|
escapeForXml(version.entry.versionId),
|
||||||
'</VersionId>'
|
'</VersionId>',
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (isDeleteMarker) {
|
if (isDeleteMarker) {
|
||||||
|
@ -126,7 +126,7 @@ function _formatXML(quietSetting, errorResults, deleted) {
|
||||||
'</DeleteMarker>',
|
'</DeleteMarker>',
|
||||||
'<DeleteMarkerVersionId>',
|
'<DeleteMarkerVersionId>',
|
||||||
deleteMarkerVersionId,
|
deleteMarkerVersionId,
|
||||||
'</DeleteMarkerVersionId>'
|
'</DeleteMarkerVersionId>',
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
deletedXML.push('</Deleted>');
|
deletedXML.push('</Deleted>');
|
||||||
|
@ -183,7 +183,7 @@ function _parseXml(xmlToParse, next) {
|
||||||
* successfullyDeleted, totalContentLengthDeleted)
|
* successfullyDeleted, totalContentLengthDeleted)
|
||||||
*/
|
*/
|
||||||
function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||||
bucketName, bucket, quietSetting, errorResults, inPlay, log, next) {
|
bucketName, bucket, quietSetting, errorResults, inPlay, log, next) {
|
||||||
const successfullyDeleted = [];
|
const successfullyDeleted = [];
|
||||||
let totalContentLengthDeleted = 0;
|
let totalContentLengthDeleted = 0;
|
||||||
let numOfObjectsRemoved = 0;
|
let numOfObjectsRemoved = 0;
|
||||||
|
@ -210,10 +210,10 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||||
(versionId, callback) => metadataGetObject(bucketName, entry.key,
|
(versionId, callback) => metadataGetObject(bucketName, entry.key,
|
||||||
versionId, log, (err, objMD) => {
|
versionId, log, (err, objMD) => {
|
||||||
// if general error from metadata return error
|
// if general error from metadata return error
|
||||||
if (err && !err.NoSuchKey) {
|
if (err && !err.is.NoSuchKey) {
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
if (err && err.NoSuchKey) {
|
if (err?.is.NoSuchKey) {
|
||||||
const verCfg = bucket.getVersioningConfiguration();
|
const verCfg = bucket.getVersioningConfiguration();
|
||||||
// To adhere to AWS behavior, create a delete marker
|
// To adhere to AWS behavior, create a delete marker
|
||||||
// if trying to delete an object that does not exist
|
// if trying to delete an object that does not exist
|
||||||
|
@ -237,22 +237,22 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||||
}),
|
}),
|
||||||
(objMD, versionId, callback) =>
|
(objMD, versionId, callback) =>
|
||||||
preprocessingVersioningDelete(bucketName, bucket, objMD,
|
preprocessingVersioningDelete(bucketName, bucket, objMD,
|
||||||
versionId, log, (err, options) => callback(err, options,
|
versionId, log, (err, options) => callback(err, options,
|
||||||
objMD)),
|
objMD)),
|
||||||
(options, objMD, callback) => {
|
(options, objMD, callback) => {
|
||||||
const deleteInfo = {};
|
const deleteInfo = {};
|
||||||
if (options && options.deleteData) {
|
if (options && options.deleteData) {
|
||||||
deleteInfo.deleted = true;
|
deleteInfo.deleted = true;
|
||||||
return services.deleteObject(bucketName, objMD,
|
return services.deleteObject(bucketName, objMD,
|
||||||
entry.key, options, log, err =>
|
entry.key, options, log, err =>
|
||||||
callback(err, objMD, deleteInfo));
|
callback(err, objMD, deleteInfo));
|
||||||
}
|
}
|
||||||
deleteInfo.newDeleteMarker = true;
|
deleteInfo.newDeleteMarker = true;
|
||||||
// This call will create a delete-marker
|
// This call will create a delete-marker
|
||||||
return createAndStoreObject(bucketName, bucket, entry.key,
|
return createAndStoreObject(bucketName, bucket, entry.key,
|
||||||
objMD, authInfo, canonicalID, null, request,
|
objMD, authInfo, canonicalID, null, request,
|
||||||
deleteInfo.newDeleteMarker, null, log, (err, result) =>
|
deleteInfo.newDeleteMarker, null, log, (err, result) =>
|
||||||
callback(err, objMD, deleteInfo, result.versionId));
|
callback(err, objMD, deleteInfo, result.versionId));
|
||||||
},
|
},
|
||||||
], (err, objMD, deleteInfo, versionId) => {
|
], (err, objMD, deleteInfo, versionId) => {
|
||||||
if (err === skipError) {
|
if (err === skipError) {
|
||||||
|
@ -386,7 +386,7 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
return vault.checkPolicies(requestContextParams, authInfo.getArn(),
|
return vault.checkPolicies(requestContextParams, authInfo.getArn(),
|
||||||
log, (err, authorizationResults) => {
|
log, (err, authorizationResults) => {
|
||||||
// there were no policies so received a blanket AccessDenied
|
// there were no policies so received a blanket AccessDenied
|
||||||
if (err && err.AccessDenied) {
|
if (err?.is.AccessDenied) {
|
||||||
objects.forEach(entry => {
|
objects.forEach(entry => {
|
||||||
errorResults.push({
|
errorResults.push({
|
||||||
entry,
|
entry,
|
||||||
|
@ -482,8 +482,8 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay,
|
function getObjMetadataAndDeleteStep(quietSetting, errorResults, inPlay,
|
||||||
bucket, next) {
|
bucket, next) {
|
||||||
return getObjMetadataAndDelete(authInfo, canonicalID, request,
|
return getObjMetadataAndDelete(authInfo, canonicalID, request,
|
||||||
bucketName, bucket, quietSetting, errorResults, inPlay,
|
bucketName, bucket, quietSetting, errorResults, inPlay,
|
||||||
log, next);
|
log, next);
|
||||||
},
|
},
|
||||||
], (err, quietSetting, errorResults, numOfObjectsRemoved,
|
], (err, quietSetting, errorResults, numOfObjectsRemoved,
|
||||||
successfullyDeleted, totalContentLengthDeleted, bucket) => {
|
successfullyDeleted, totalContentLengthDeleted, bucket) => {
|
||||||
|
|
|
@ -1,5 +1,3 @@
|
||||||
const { errors } = require('arsenal');
|
|
||||||
|
|
||||||
const abortMultipartUpload = require('./apiUtils/object/abortMultipartUpload');
|
const abortMultipartUpload = require('./apiUtils/object/abortMultipartUpload');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const isLegacyAWSBehavior = require('../utilities/legacyAWSBehavior');
|
const isLegacyAWSBehavior = require('../utilities/legacyAWSBehavior');
|
||||||
|
@ -29,10 +27,10 @@ function multipartDelete(authInfo, request, log, callback) {
|
||||||
request.method, destinationBucket);
|
request.method, destinationBucket);
|
||||||
const location = destinationBucket ?
|
const location = destinationBucket ?
|
||||||
destinationBucket.getLocationConstraint() : null;
|
destinationBucket.getLocationConstraint() : null;
|
||||||
if (err && err !== errors.NoSuchUpload) {
|
if (err && !err.is.NoSuchUpload) {
|
||||||
return callback(err, corsHeaders);
|
return callback(err, corsHeaders);
|
||||||
}
|
}
|
||||||
if (err === errors.NoSuchUpload && isLegacyAWSBehavior(location)) {
|
if (err?.is.NoSuchUpload && isLegacyAWSBehavior(location)) {
|
||||||
log.trace('did not find valid mpu with uploadId', {
|
log.trace('did not find valid mpu with uploadId', {
|
||||||
method: 'multipartDelete',
|
method: 'multipartDelete',
|
||||||
uploadId,
|
uploadId,
|
||||||
|
|
|
@ -60,7 +60,7 @@ function _prepMetadata(request, sourceObjMD, headers, sourceIsDestination,
|
||||||
whichTagging = whichTagging === undefined ? 'COPY' : whichTagging;
|
whichTagging = whichTagging === undefined ? 'COPY' : whichTagging;
|
||||||
if (whichTagging !== 'COPY' && whichTagging !== 'REPLACE') {
|
if (whichTagging !== 'COPY' && whichTagging !== 'REPLACE') {
|
||||||
return { error: errors.InvalidArgument
|
return { error: errors.InvalidArgument
|
||||||
.customizeDescription('Unknown tagging directive') };
|
.customizeDescription('Unknown tagging directive') };
|
||||||
}
|
}
|
||||||
const overrideMetadata = {};
|
const overrideMetadata = {};
|
||||||
if (headers['x-amz-server-side-encryption']) {
|
if (headers['x-amz-server-side-encryption']) {
|
||||||
|
@ -185,7 +185,7 @@ function _prepMetadata(request, sourceObjMD, headers, sourceIsDestination,
|
||||||
storeMetadataParams.contentType = sourceObjMD['content-type'];
|
storeMetadataParams.contentType = sourceObjMD['content-type'];
|
||||||
}
|
}
|
||||||
return { storeMetadataParams, sourceLocationConstraintName,
|
return { storeMetadataParams, sourceLocationConstraintName,
|
||||||
backendInfoDest: backendInfoObjDest.backendInfo };
|
backendInfoDest: backendInfoObjDest.backendInfo };
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -249,7 +249,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
(err, destBucketMD, destObjMD) => {
|
(err, destBucketMD, destObjMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error validating put part of request',
|
log.debug('error validating put part of request',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
return next(err, destBucketMD);
|
return next(err, destBucketMD);
|
||||||
}
|
}
|
||||||
const flag = destBucketMD.hasDeletedFlag()
|
const flag = destBucketMD.hasDeletedFlag()
|
||||||
|
@ -267,7 +267,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
(err, sourceBucketMD, sourceObjMD) => {
|
(err, sourceBucketMD, sourceObjMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error validating get part of request',
|
log.debug('error validating get part of request',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
return next(err, null, destBucketMD);
|
return next(err, null, destBucketMD);
|
||||||
}
|
}
|
||||||
if (!sourceObjMD) {
|
if (!sourceObjMD) {
|
||||||
|
@ -278,10 +278,10 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
}
|
}
|
||||||
if (sourceObjMD.isDeleteMarker) {
|
if (sourceObjMD.isDeleteMarker) {
|
||||||
log.debug('delete marker on source object',
|
log.debug('delete marker on source object',
|
||||||
{ sourceObject });
|
{ sourceObject });
|
||||||
if (sourceVersionId) {
|
if (sourceVersionId) {
|
||||||
const err = errors.InvalidRequest
|
const err = errors.InvalidRequest
|
||||||
.customizeDescription('The source of a copy ' +
|
.customizeDescription('The source of a copy ' +
|
||||||
'request may not specifically refer to a delete' +
|
'request may not specifically refer to a delete' +
|
||||||
'marker by version id.');
|
'marker by version id.');
|
||||||
return next(err, destBucketMD);
|
return next(err, destBucketMD);
|
||||||
|
@ -293,13 +293,13 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
}
|
}
|
||||||
const headerValResult =
|
const headerValResult =
|
||||||
validateHeaders(request.headers,
|
validateHeaders(request.headers,
|
||||||
sourceObjMD['last-modified'],
|
sourceObjMD['last-modified'],
|
||||||
sourceObjMD['content-md5']);
|
sourceObjMD['content-md5']);
|
||||||
if (headerValResult.error) {
|
if (headerValResult.error) {
|
||||||
return next(errors.PreconditionFailed, destBucketMD);
|
return next(errors.PreconditionFailed, destBucketMD);
|
||||||
}
|
}
|
||||||
const { storeMetadataParams, error: metadataError,
|
const { storeMetadataParams, error: metadataError,
|
||||||
sourceLocationConstraintName, backendInfoDest } =
|
sourceLocationConstraintName, backendInfoDest } =
|
||||||
_prepMetadata(request, sourceObjMD, request.headers,
|
_prepMetadata(request, sourceObjMD, request.headers,
|
||||||
sourceIsDestination, authInfo, destObjectKey,
|
sourceIsDestination, authInfo, destObjectKey,
|
||||||
sourceBucketMD, destBucketMD, sourceVersionId, log);
|
sourceBucketMD, destBucketMD, sourceVersionId, log);
|
||||||
|
@ -319,7 +319,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
// md-model-version 2, need to handle cases where
|
// md-model-version 2, need to handle cases where
|
||||||
// objMD.location is just a string
|
// objMD.location is just a string
|
||||||
dataLocator = Array.isArray(sourceObjMD.location) ?
|
dataLocator = Array.isArray(sourceObjMD.location) ?
|
||||||
sourceObjMD.location : [{ key: sourceObjMD.location }];
|
sourceObjMD.location : [{ key: sourceObjMD.location }];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sourceObjMD['x-amz-server-side-encryption']) {
|
if (sourceObjMD['x-amz-server-side-encryption']) {
|
||||||
|
@ -339,13 +339,13 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
destBucketMD, destObjMD, sourceLocationConstraintName,
|
destBucketMD, destObjMD, sourceLocationConstraintName,
|
||||||
backendInfoDest, next) {
|
backendInfoDest, next) {
|
||||||
getObjectSSEConfiguration(
|
getObjectSSEConfiguration(
|
||||||
request.headers,
|
request.headers,
|
||||||
destBucketMD,
|
destBucketMD,
|
||||||
log,
|
log,
|
||||||
(err, sseConfig) =>
|
(err, sseConfig) =>
|
||||||
next(err, storeMetadataParams, dataLocator, sourceBucketMD,
|
next(err, storeMetadataParams, dataLocator, sourceBucketMD,
|
||||||
destBucketMD, destObjMD, sourceLocationConstraintName,
|
destBucketMD, destObjMD, sourceLocationConstraintName,
|
||||||
backendInfoDest, sseConfig));
|
backendInfoDest, sseConfig));
|
||||||
},
|
},
|
||||||
function goGetData(storeMetadataParams, dataLocator, sourceBucketMD,
|
function goGetData(storeMetadataParams, dataLocator, sourceBucketMD,
|
||||||
destBucketMD, destObjMD, sourceLocationConstraintName,
|
destBucketMD, destObjMD, sourceLocationConstraintName,
|
||||||
|
@ -380,7 +380,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
{ method: 'multipleBackendGateway',
|
{ method: 'multipleBackendGateway',
|
||||||
error: errors.NotImplemented });
|
error: errors.NotImplemented });
|
||||||
return next(errors.NotImplemented.customizeDescription(
|
return next(errors.NotImplemented.customizeDescription(
|
||||||
externalVersioningErrorMessage), destBucketMD);
|
externalVersioningErrorMessage), destBucketMD);
|
||||||
}
|
}
|
||||||
if (dataLocator.length === 0) {
|
if (dataLocator.length === 0) {
|
||||||
if (!storeMetadataParams.locationMatch &&
|
if (!storeMetadataParams.locationMatch &&
|
||||||
|
@ -409,15 +409,15 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
serverSideEncryption, destBucketMD);
|
serverSideEncryption, destBucketMD);
|
||||||
}
|
}
|
||||||
return data.copyObject(request, sourceLocationConstraintName,
|
return data.copyObject(request, sourceLocationConstraintName,
|
||||||
storeMetadataParams, dataLocator, dataStoreContext,
|
storeMetadataParams, dataLocator, dataStoreContext,
|
||||||
backendInfoDest, sourceBucketMD, destBucketMD, serverSideEncryption, log,
|
backendInfoDest, sourceBucketMD, destBucketMD, serverSideEncryption, log,
|
||||||
(err, results) => {
|
(err, results) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, destBucketMD);
|
return next(err, destBucketMD);
|
||||||
}
|
}
|
||||||
return next(null, storeMetadataParams, results,
|
return next(null, storeMetadataParams, results,
|
||||||
destObjMD, serverSideEncryption, destBucketMD);
|
destObjMD, serverSideEncryption, destBucketMD);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function getVersioningInfo(storeMetadataParams, destDataGetInfoArr,
|
function getVersioningInfo(storeMetadataParams, destDataGetInfoArr,
|
||||||
destObjMD, serverSideEncryption, destBucketMD, next) {
|
destObjMD, serverSideEncryption, destBucketMD, next) {
|
||||||
|
@ -426,7 +426,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
(err, options) => {
|
(err, options) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing versioning info',
|
log.debug('error processing versioning info',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
return next(err, null, destBucketMD);
|
return next(err, null, destBucketMD);
|
||||||
}
|
}
|
||||||
// eslint-disable-next-line
|
// eslint-disable-next-line
|
||||||
|
@ -494,8 +494,8 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
{ error: err });
|
{ error: err });
|
||||||
}
|
}
|
||||||
next(null,
|
next(null,
|
||||||
storingNewMdResult, destBucketMD, storeMetadataParams,
|
storingNewMdResult, destBucketMD, storeMetadataParams,
|
||||||
serverSideEncryption, sourceObjSize, destObjPrevSize);
|
serverSideEncryption, sourceObjSize, destObjPrevSize);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
return next(null,
|
return next(null,
|
||||||
|
@ -524,20 +524,20 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
serverSideEncryption.algorithm;
|
serverSideEncryption.algorithm;
|
||||||
if (serverSideEncryption.algorithm === 'aws:kms') {
|
if (serverSideEncryption.algorithm === 'aws:kms') {
|
||||||
additionalHeaders[
|
additionalHeaders[
|
||||||
'x-amz-server-side-encryption-aws-kms-key-id'] =
|
'x-amz-server-side-encryption-aws-kms-key-id'] =
|
||||||
serverSideEncryption.masterKeyId;
|
serverSideEncryption.masterKeyId;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (sourceVersionId) {
|
if (sourceVersionId) {
|
||||||
additionalHeaders['x-amz-copy-source-version-id'] =
|
additionalHeaders['x-amz-copy-source-version-id'] =
|
||||||
versionIdUtils.encode(sourceVersionId,
|
versionIdUtils.encode(sourceVersionId,
|
||||||
config.versionIdEncodingType);
|
config.versionIdEncodingType);
|
||||||
}
|
}
|
||||||
const isVersioned = storingNewMdResult && storingNewMdResult.versionId;
|
const isVersioned = storingNewMdResult && storingNewMdResult.versionId;
|
||||||
if (isVersioned) {
|
if (isVersioned) {
|
||||||
additionalHeaders['x-amz-version-id'] =
|
additionalHeaders['x-amz-version-id'] =
|
||||||
versionIdUtils.encode(storingNewMdResult.versionId,
|
versionIdUtils.encode(storingNewMdResult.versionId,
|
||||||
config.versionIdEncodingType);
|
config.versionIdEncodingType);
|
||||||
}
|
}
|
||||||
|
|
||||||
Object.assign(responseHeaders, additionalHeaders);
|
Object.assign(responseHeaders, additionalHeaders);
|
||||||
|
|
|
@ -55,49 +55,49 @@ function objectDelete(authInfo, request, log, cb) {
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
function validateBucketAndObj(next) {
|
function validateBucketAndObj(next) {
|
||||||
return metadataValidateBucketAndObj(valParams, log,
|
return metadataValidateBucketAndObj(valParams, log,
|
||||||
(err, bucketMD, objMD) => {
|
(err, bucketMD, objMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return next(err, bucketMD);
|
return next(err, bucketMD);
|
||||||
}
|
}
|
||||||
|
|
||||||
const versioningCfg = bucketMD.getVersioningConfiguration();
|
const versioningCfg = bucketMD.getVersioningConfiguration();
|
||||||
if (!objMD) {
|
if (!objMD) {
|
||||||
if (!versioningCfg) {
|
if (!versioningCfg) {
|
||||||
return next(errors.NoSuchKey, bucketMD);
|
return next(errors.NoSuchKey, bucketMD);
|
||||||
}
|
}
|
||||||
// AWS does not return an error when trying to delete a
|
// AWS does not return an error when trying to delete a
|
||||||
// specific version that does not exist. We skip to the end
|
// specific version that does not exist. We skip to the end
|
||||||
// of the waterfall here.
|
// of the waterfall here.
|
||||||
if (reqVersionId) {
|
if (reqVersionId) {
|
||||||
log.debug('trying to delete specific version ' +
|
log.debug('trying to delete specific version ' +
|
||||||
' that does not exist');
|
' that does not exist');
|
||||||
return next(errors.NoSuchVersion, bucketMD);
|
return next(errors.NoSuchVersion, bucketMD);
|
||||||
|
}
|
||||||
|
// To adhere to AWS behavior, create a delete marker even
|
||||||
|
// if trying to delete an object that does not exist when
|
||||||
|
// versioning has been configured
|
||||||
|
return next(null, bucketMD, objMD);
|
||||||
}
|
}
|
||||||
// To adhere to AWS behavior, create a delete marker even
|
// AWS only returns an object lock error if a version id
|
||||||
// if trying to delete an object that does not exist when
|
// is specified, else continue to create a delete marker
|
||||||
// versioning has been configured
|
if (reqVersionId &&
|
||||||
return next(null, bucketMD, objMD);
|
|
||||||
}
|
|
||||||
// AWS only returns an object lock error if a version id
|
|
||||||
// is specified, else continue to create a delete marker
|
|
||||||
if (reqVersionId &&
|
|
||||||
isObjectLocked(bucketMD, objMD, request.headers)) {
|
isObjectLocked(bucketMD, objMD, request.headers)) {
|
||||||
log.debug('trying to delete locked object');
|
log.debug('trying to delete locked object');
|
||||||
return next(objectLockedError, bucketMD);
|
return next(objectLockedError, bucketMD);
|
||||||
}
|
}
|
||||||
if (reqVersionId && objMD.location &&
|
if (reqVersionId && objMD.location &&
|
||||||
Array.isArray(objMD.location) && objMD.location[0]) {
|
Array.isArray(objMD.location) && objMD.location[0]) {
|
||||||
// we need this information for data deletes to AWS
|
// we need this information for data deletes to AWS
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
objMD.location[0].deleteVersion = true;
|
objMD.location[0].deleteVersion = true;
|
||||||
}
|
}
|
||||||
if (objMD['content-length'] !== undefined) {
|
if (objMD['content-length'] !== undefined) {
|
||||||
log.end().addDefaultFields({
|
log.end().addDefaultFields({
|
||||||
bytesDeleted: objMD['content-length'],
|
bytesDeleted: objMD['content-length'],
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
return next(null, bucketMD, objMD);
|
return next(null, bucketMD, objMD);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function getVersioningInfo(bucketMD, objectMD, next) {
|
function getVersioningInfo(bucketMD, objectMD, next) {
|
||||||
return preprocessingVersioningDelete(bucketName,
|
return preprocessingVersioningDelete(bucketName,
|
||||||
|
@ -105,7 +105,7 @@ function objectDelete(authInfo, request, log, cb) {
|
||||||
(err, options) => {
|
(err, options) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('err processing versioning info',
|
log.error('err processing versioning info',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
return next(err, bucketMD);
|
return next(err, bucketMD);
|
||||||
}
|
}
|
||||||
return next(null, bucketMD, objectMD, options);
|
return next(null, bucketMD, objectMD, options);
|
||||||
|
@ -124,14 +124,14 @@ function objectDelete(authInfo, request, log, cb) {
|
||||||
}
|
}
|
||||||
return services.deleteObject(bucketName, objectMD, objectKey,
|
return services.deleteObject(bucketName, objectMD, objectKey,
|
||||||
delOptions, log, (err, delResult) => next(err, bucketMD,
|
delOptions, log, (err, delResult) => next(err, bucketMD,
|
||||||
objectMD, delResult, deleteInfo));
|
objectMD, delResult, deleteInfo));
|
||||||
}
|
}
|
||||||
// putting a new delete marker
|
// putting a new delete marker
|
||||||
deleteInfo.newDeleteMarker = true;
|
deleteInfo.newDeleteMarker = true;
|
||||||
return createAndStoreObject(bucketName, bucketMD,
|
return createAndStoreObject(bucketName, bucketMD,
|
||||||
objectKey, objectMD, authInfo, canonicalID, null, request,
|
objectKey, objectMD, authInfo, canonicalID, null, request,
|
||||||
deleteInfo.newDeleteMarker, null, log, (err, newDelMarkerRes) =>
|
deleteInfo.newDeleteMarker, null, log, (err, newDelMarkerRes) =>
|
||||||
next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo));
|
next(err, bucketMD, objectMD, newDelMarkerRes, deleteInfo));
|
||||||
},
|
},
|
||||||
], (err, bucketMD, objectMD, result, deleteInfo) => {
|
], (err, bucketMD, objectMD, result, deleteInfo) => {
|
||||||
const resHeaders = collectCorsHeaders(request.headers.origin,
|
const resHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
|
|
|
@ -48,26 +48,26 @@ function objectDeleteTagging(authInfo, request, log, callback) {
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => metadataValidateBucketAndObj(metadataValParams, log,
|
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(err, bucket, objectMD) => {
|
(err, bucket, objectMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('request authorization failed',
|
log.trace('request authorization failed',
|
||||||
{ method: 'objectDeleteTagging', error: err });
|
{ method: 'objectDeleteTagging', error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
if (!objectMD) {
|
if (!objectMD) {
|
||||||
const err = reqVersionId ? errors.NoSuchVersion :
|
const err = reqVersionId ? errors.NoSuchVersion :
|
||||||
errors.NoSuchKey;
|
errors.NoSuchKey;
|
||||||
log.trace('error no object metadata found',
|
log.trace('error no object metadata found',
|
||||||
{ method: 'objectDeleteTagging', error: err });
|
{ method: 'objectDeleteTagging', error: err });
|
||||||
return next(err, bucket);
|
return next(err, bucket);
|
||||||
}
|
}
|
||||||
if (objectMD.isDeleteMarker) {
|
if (objectMD.isDeleteMarker) {
|
||||||
log.trace('version is a delete marker',
|
log.trace('version is a delete marker',
|
||||||
{ method: 'objectDeleteTagging' });
|
{ method: 'objectDeleteTagging' });
|
||||||
return next(errors.MethodNotAllowed, bucket);
|
return next(errors.MethodNotAllowed, bucket);
|
||||||
}
|
}
|
||||||
return next(null, bucket, objectMD);
|
return next(null, bucket, objectMD);
|
||||||
}),
|
}),
|
||||||
(bucket, objectMD, next) => {
|
(bucket, objectMD, next) => {
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
objectMD.tags = {};
|
objectMD.tags = {};
|
||||||
|
@ -81,13 +81,13 @@ function objectDeleteTagging(authInfo, request, log, callback) {
|
||||||
objectMD.replicationInfo, replicationInfo);
|
objectMD.replicationInfo, replicationInfo);
|
||||||
}
|
}
|
||||||
metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params,
|
metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params,
|
||||||
log, err =>
|
log, err =>
|
||||||
next(err, bucket, objectMD));
|
next(err, bucket, objectMD));
|
||||||
},
|
},
|
||||||
(bucket, objectMD, next) => {
|
(bucket, objectMD, next) => {
|
||||||
if (config.backends.data === 'multiple') {
|
if (config.backends.data === 'multiple') {
|
||||||
return multipleBackendGateway.objectTagging('Delete', objectKey,
|
return multipleBackendGateway.objectTagging('Delete', objectKey,
|
||||||
bucket, objectMD, log, err => next(err, bucket, objectMD));
|
bucket, objectMD, log, err => next(err, bucket, objectMD));
|
||||||
}
|
}
|
||||||
return next(null, bucket, objectMD);
|
return next(null, bucket, objectMD);
|
||||||
},
|
},
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
const { errors, s3middleware } = require('arsenal');
|
const { errors, s3middleware } = require('arsenal');
|
||||||
const { parseRange } = require('arsenal/lib/network/http/utils');
|
const { parseRange } = require('arsenal').network.http.utils;
|
||||||
|
|
||||||
const data = require('../data/wrapper');
|
const data = require('../data/wrapper');
|
||||||
|
|
||||||
|
@ -49,187 +49,187 @@ function objectGet(authInfo, request, returnTagCount, log, callback) {
|
||||||
};
|
};
|
||||||
|
|
||||||
return metadataValidateBucketAndObj(mdValParams, log,
|
return metadataValidateBucketAndObj(mdValParams, log,
|
||||||
(err, bucket, objMD) => {
|
(err, bucket, objMD) => {
|
||||||
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
const corsHeaders = collectCorsHeaders(request.headers.origin,
|
||||||
request.method, bucket);
|
request.method, bucket);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error processing request', {
|
log.debug('error processing request', {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'metadataValidateBucketAndObj',
|
method: 'metadataValidateBucketAndObj',
|
||||||
});
|
});
|
||||||
return callback(err, null, corsHeaders);
|
return callback(err, null, corsHeaders);
|
||||||
}
|
|
||||||
if (!objMD) {
|
|
||||||
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
|
|
||||||
return callback(err, null, corsHeaders);
|
|
||||||
}
|
|
||||||
const verCfg = bucket.getVersioningConfiguration();
|
|
||||||
if (objMD.isDeleteMarker) {
|
|
||||||
const responseMetaHeaders = Object.assign({},
|
|
||||||
{ 'x-amz-delete-marker': true }, corsHeaders);
|
|
||||||
if (!versionId) {
|
|
||||||
return callback(errors.NoSuchKey, null, responseMetaHeaders);
|
|
||||||
}
|
}
|
||||||
// return MethodNotAllowed if requesting a specific
|
if (!objMD) {
|
||||||
// version that has a delete marker
|
const err = versionId ? errors.NoSuchVersion : errors.NoSuchKey;
|
||||||
responseMetaHeaders['x-amz-version-id'] =
|
return callback(err, null, corsHeaders);
|
||||||
|
}
|
||||||
|
const verCfg = bucket.getVersioningConfiguration();
|
||||||
|
if (objMD.isDeleteMarker) {
|
||||||
|
const responseMetaHeaders = Object.assign({},
|
||||||
|
{ 'x-amz-delete-marker': true }, corsHeaders);
|
||||||
|
if (!versionId) {
|
||||||
|
return callback(errors.NoSuchKey, null, responseMetaHeaders);
|
||||||
|
}
|
||||||
|
// return MethodNotAllowed if requesting a specific
|
||||||
|
// version that has a delete marker
|
||||||
|
responseMetaHeaders['x-amz-version-id'] =
|
||||||
getVersionIdResHeader(verCfg, objMD);
|
getVersionIdResHeader(verCfg, objMD);
|
||||||
return callback(errors.MethodNotAllowed, null,
|
return callback(errors.MethodNotAllowed, null,
|
||||||
responseMetaHeaders);
|
responseMetaHeaders);
|
||||||
}
|
|
||||||
const headerValResult = validateHeaders(request.headers,
|
|
||||||
objMD['last-modified'], objMD['content-md5']);
|
|
||||||
if (headerValResult.error) {
|
|
||||||
return callback(headerValResult.error, null, corsHeaders);
|
|
||||||
}
|
|
||||||
const responseMetaHeaders = collectResponseHeaders(objMD,
|
|
||||||
corsHeaders, verCfg, returnTagCount);
|
|
||||||
|
|
||||||
setExpirationHeaders(responseMetaHeaders, {
|
|
||||||
lifecycleConfig: bucket.getLifecycleConfiguration(),
|
|
||||||
objectParams: {
|
|
||||||
key: objectKey,
|
|
||||||
tags: objMD.tags,
|
|
||||||
date: objMD['last-modified'],
|
|
||||||
},
|
|
||||||
isVersionedReq: !!versionId,
|
|
||||||
});
|
|
||||||
|
|
||||||
const objLength = (objMD.location === null ?
|
|
||||||
0 : parseInt(objMD['content-length'], 10));
|
|
||||||
let byteRange;
|
|
||||||
const streamingParams = {};
|
|
||||||
if (request.headers.range) {
|
|
||||||
const { range, error } = parseRange(request.headers.range,
|
|
||||||
objLength);
|
|
||||||
if (error) {
|
|
||||||
return callback(error, null, corsHeaders);
|
|
||||||
}
|
}
|
||||||
responseMetaHeaders['Accept-Ranges'] = 'bytes';
|
const headerValResult = validateHeaders(request.headers,
|
||||||
if (range) {
|
objMD['last-modified'], objMD['content-md5']);
|
||||||
byteRange = range;
|
if (headerValResult.error) {
|
||||||
// End of range should be included so + 1
|
return callback(headerValResult.error, null, corsHeaders);
|
||||||
responseMetaHeaders['Content-Length'] =
|
}
|
||||||
|
const responseMetaHeaders = collectResponseHeaders(objMD,
|
||||||
|
corsHeaders, verCfg, returnTagCount);
|
||||||
|
|
||||||
|
setExpirationHeaders(responseMetaHeaders, {
|
||||||
|
lifecycleConfig: bucket.getLifecycleConfiguration(),
|
||||||
|
objectParams: {
|
||||||
|
key: objectKey,
|
||||||
|
tags: objMD.tags,
|
||||||
|
date: objMD['last-modified'],
|
||||||
|
},
|
||||||
|
isVersionedReq: !!versionId,
|
||||||
|
});
|
||||||
|
|
||||||
|
const objLength = (objMD.location === null ?
|
||||||
|
0 : parseInt(objMD['content-length'], 10));
|
||||||
|
let byteRange;
|
||||||
|
const streamingParams = {};
|
||||||
|
if (request.headers.range) {
|
||||||
|
const { range, error } = parseRange(request.headers.range,
|
||||||
|
objLength);
|
||||||
|
if (error) {
|
||||||
|
return callback(error, null, corsHeaders);
|
||||||
|
}
|
||||||
|
responseMetaHeaders['Accept-Ranges'] = 'bytes';
|
||||||
|
if (range) {
|
||||||
|
byteRange = range;
|
||||||
|
// End of range should be included so + 1
|
||||||
|
responseMetaHeaders['Content-Length'] =
|
||||||
range[1] - range[0] + 1;
|
range[1] - range[0] + 1;
|
||||||
responseMetaHeaders['Content-Range'] =
|
responseMetaHeaders['Content-Range'] =
|
||||||
`bytes ${range[0]}-${range[1]}/${objLength}`;
|
`bytes ${range[0]}-${range[1]}/${objLength}`;
|
||||||
streamingParams.rangeStart = range[0] ?
|
streamingParams.rangeStart = range[0] ?
|
||||||
range[0].toString() : undefined;
|
range[0].toString() : undefined;
|
||||||
streamingParams.rangeEnd = range[1] ?
|
streamingParams.rangeEnd = range[1] ?
|
||||||
range[1].toString() : undefined;
|
range[1].toString() : undefined;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
let dataLocator = null;
|
||||||
let dataLocator = null;
|
if (objMD.location !== null) {
|
||||||
if (objMD.location !== null) {
|
|
||||||
// To provide for backwards compatibility before
|
// To provide for backwards compatibility before
|
||||||
// md-model-version 2, need to handle cases where
|
// md-model-version 2, need to handle cases where
|
||||||
// objMD.location is just a string
|
// objMD.location is just a string
|
||||||
dataLocator = Array.isArray(objMD.location) ?
|
dataLocator = Array.isArray(objMD.location) ?
|
||||||
objMD.location : [{ key: objMD.location }];
|
objMD.location : [{ key: objMD.location }];
|
||||||
// if the data backend is azure, there will only ever be at
|
// if the data backend is azure, there will only ever be at
|
||||||
// most one item in the dataLocator array
|
// most one item in the dataLocator array
|
||||||
if (dataLocator[0] && dataLocator[0].dataStoreType === 'azure') {
|
if (dataLocator[0] && dataLocator[0].dataStoreType === 'azure') {
|
||||||
dataLocator[0].azureStreamingOptions = streamingParams;
|
dataLocator[0].azureStreamingOptions = streamingParams;
|
||||||
}
|
}
|
||||||
|
|
||||||
let partNumber = null;
|
let partNumber = null;
|
||||||
if (request.query && request.query.partNumber !== undefined) {
|
if (request.query && request.query.partNumber !== undefined) {
|
||||||
if (byteRange) {
|
if (byteRange) {
|
||||||
const error = errors.InvalidRequest
|
const error = errors.InvalidRequest
|
||||||
.customizeDescription('Cannot specify both Range ' +
|
.customizeDescription('Cannot specify both Range ' +
|
||||||
'header and partNumber query parameter.');
|
'header and partNumber query parameter.');
|
||||||
return callback(error, null, corsHeaders);
|
return callback(error, null, corsHeaders);
|
||||||
}
|
}
|
||||||
partNumber = Number.parseInt(request.query.partNumber, 10);
|
partNumber = Number.parseInt(request.query.partNumber, 10);
|
||||||
if (Number.isNaN(partNumber)) {
|
if (Number.isNaN(partNumber)) {
|
||||||
const error = errors.InvalidArgument
|
const error = errors.InvalidArgument
|
||||||
.customizeDescription('Part number must be a number.');
|
.customizeDescription('Part number must be a number.');
|
||||||
return callback(error, null, corsHeaders);
|
return callback(error, null, corsHeaders);
|
||||||
}
|
}
|
||||||
if (partNumber < 1 || partNumber > 10000) {
|
if (partNumber < 1 || partNumber > 10000) {
|
||||||
const error = errors.InvalidArgument
|
const error = errors.InvalidArgument
|
||||||
.customizeDescription('Part number must be an ' +
|
.customizeDescription('Part number must be an ' +
|
||||||
'integer between 1 and 10000, inclusive.');
|
'integer between 1 and 10000, inclusive.');
|
||||||
return callback(error, null, corsHeaders);
|
return callback(error, null, corsHeaders);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
// If have a data model before version 2, cannot support
|
||||||
// If have a data model before version 2, cannot support
|
// get range for objects with multiple parts
|
||||||
// get range for objects with multiple parts
|
if (byteRange && dataLocator.length > 1 &&
|
||||||
if (byteRange && dataLocator.length > 1 &&
|
|
||||||
dataLocator[0].start === undefined) {
|
dataLocator[0].start === undefined) {
|
||||||
return callback(errors.NotImplemented, null, corsHeaders);
|
return callback(errors.NotImplemented, null, corsHeaders);
|
||||||
}
|
|
||||||
if (objMD['x-amz-server-side-encryption']) {
|
|
||||||
for (let i = 0; i < dataLocator.length; i++) {
|
|
||||||
dataLocator[i].masterKeyId =
|
|
||||||
objMD['x-amz-server-side-encryption-aws-kms-key-id'];
|
|
||||||
dataLocator[i].algorithm =
|
|
||||||
objMD['x-amz-server-side-encryption'];
|
|
||||||
}
|
}
|
||||||
}
|
if (objMD['x-amz-server-side-encryption']) {
|
||||||
if (partNumber) {
|
for (let i = 0; i < dataLocator.length; i++) {
|
||||||
const locations = [];
|
dataLocator[i].masterKeyId =
|
||||||
let locationPartNumber;
|
objMD['x-amz-server-side-encryption-aws-kms-key-id'];
|
||||||
for (let i = 0; i < objMD.location.length; i++) {
|
dataLocator[i].algorithm =
|
||||||
const { dataStoreETag } = objMD.location[i];
|
objMD['x-amz-server-side-encryption'];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (partNumber) {
|
||||||
|
const locations = [];
|
||||||
|
let locationPartNumber;
|
||||||
|
for (let i = 0; i < objMD.location.length; i++) {
|
||||||
|
const { dataStoreETag } = objMD.location[i];
|
||||||
|
|
||||||
if (dataStoreETag) {
|
if (dataStoreETag) {
|
||||||
locationPartNumber =
|
locationPartNumber =
|
||||||
Number.parseInt(dataStoreETag.split(':')[0], 10);
|
Number.parseInt(dataStoreETag.split(':')[0], 10);
|
||||||
} else {
|
} else {
|
||||||
/**
|
/**
|
||||||
* Location objects prior to GA7.1 do not include the
|
* Location objects prior to GA7.1 do not include the
|
||||||
* dataStoreETag field so we cannot find the part range,
|
* dataStoreETag field so we cannot find the part range,
|
||||||
* the objects are treated as if they only have 1 part
|
* the objects are treated as if they only have 1 part
|
||||||
*/
|
*/
|
||||||
locationPartNumber = 1;
|
locationPartNumber = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get all parts that belong to the requested part number
|
// Get all parts that belong to the requested part number
|
||||||
if (partNumber === locationPartNumber) {
|
if (partNumber === locationPartNumber) {
|
||||||
locations.push(objMD.location[i]);
|
locations.push(objMD.location[i]);
|
||||||
} else if (locationPartNumber > partNumber) {
|
} else if (locationPartNumber > partNumber) {
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
if (locations.length === 0) {
|
||||||
if (locations.length === 0) {
|
return callback(errors.InvalidPartNumber, null,
|
||||||
return callback(errors.InvalidPartNumber, null,
|
corsHeaders);
|
||||||
corsHeaders);
|
}
|
||||||
}
|
const { start } = locations[0];
|
||||||
const { start } = locations[0];
|
const endLocation = locations[locations.length - 1];
|
||||||
const endLocation = locations[locations.length - 1];
|
const end = endLocation.start + endLocation.size - 1;
|
||||||
const end = endLocation.start + endLocation.size - 1;
|
responseMetaHeaders['Content-Length'] = end - start + 1;
|
||||||
responseMetaHeaders['Content-Length'] = end - start + 1;
|
const partByteRange = [start, end];
|
||||||
const partByteRange = [start, end];
|
dataLocator = setPartRanges(dataLocator, partByteRange);
|
||||||
dataLocator = setPartRanges(dataLocator, partByteRange);
|
const partsCount = getPartCountFromMd5(objMD);
|
||||||
const partsCount = getPartCountFromMd5(objMD);
|
if (partsCount) {
|
||||||
if (partsCount) {
|
responseMetaHeaders['x-amz-mp-parts-count'] =
|
||||||
responseMetaHeaders['x-amz-mp-parts-count'] =
|
|
||||||
partsCount;
|
partsCount;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
dataLocator = setPartRanges(dataLocator, byteRange);
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
dataLocator = setPartRanges(dataLocator, byteRange);
|
|
||||||
}
|
}
|
||||||
}
|
return data.head(dataLocator, log, err => {
|
||||||
return data.head(dataLocator, log, err => {
|
if (err) {
|
||||||
if (err) {
|
log.error('error from external backend checking for ' +
|
||||||
log.error('error from external backend checking for ' +
|
|
||||||
'object existence', { error: err });
|
'object existence', { error: err });
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
pushMetric('getObject', log, {
|
pushMetric('getObject', log, {
|
||||||
authInfo,
|
authInfo,
|
||||||
bucket: bucketName,
|
bucket: bucketName,
|
||||||
keys: [objectKey],
|
keys: [objectKey],
|
||||||
newByteLength:
|
newByteLength:
|
||||||
Number.parseInt(responseMetaHeaders['Content-Length'], 10),
|
Number.parseInt(responseMetaHeaders['Content-Length'], 10),
|
||||||
versionId: objMD.versionId,
|
versionId: objMD.versionId,
|
||||||
location: objMD.dataStoreName,
|
location: objMD.dataStoreName,
|
||||||
|
});
|
||||||
|
return callback(null, dataLocator, responseMetaHeaders,
|
||||||
|
byteRange);
|
||||||
});
|
});
|
||||||
return callback(null, dataLocator, responseMetaHeaders,
|
|
||||||
byteRange);
|
|
||||||
});
|
});
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = objectGet;
|
module.exports = objectGet;
|
||||||
|
|
|
@ -47,7 +47,7 @@ function objectGetLegalHold(authInfo, request, log, callback) {
|
||||||
(err, bucket, objectMD) => {
|
(err, bucket, objectMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('request authorization failed',
|
log.trace('request authorization failed',
|
||||||
{ method: 'objectGetLegalHold', error: err });
|
{ method: 'objectGetLegalHold', error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
if (!objectMD) {
|
if (!objectMD) {
|
||||||
|
|
|
@ -44,31 +44,31 @@ function objectGetTagging(authInfo, request, log, callback) {
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => metadataValidateBucketAndObj(metadataValParams, log,
|
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(err, bucket, objectMD) => {
|
(err, bucket, objectMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('request authorization failed',
|
log.trace('request authorization failed',
|
||||||
{ method: 'objectGetTagging', error: err });
|
{ method: 'objectGetTagging', error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
if (!objectMD) {
|
if (!objectMD) {
|
||||||
const err = reqVersionId ? errors.NoSuchVersion :
|
const err = reqVersionId ? errors.NoSuchVersion :
|
||||||
errors.NoSuchKey;
|
errors.NoSuchKey;
|
||||||
log.trace('error no object metadata found',
|
log.trace('error no object metadata found',
|
||||||
{ method: 'objectGetTagging', error: err });
|
{ method: 'objectGetTagging', error: err });
|
||||||
return next(err, bucket);
|
return next(err, bucket);
|
||||||
}
|
}
|
||||||
if (objectMD.isDeleteMarker) {
|
if (objectMD.isDeleteMarker) {
|
||||||
if (reqVersionId) {
|
if (reqVersionId) {
|
||||||
log.trace('requested version is delete marker',
|
log.trace('requested version is delete marker',
|
||||||
{ method: 'objectGetTagging' });
|
{ method: 'objectGetTagging' });
|
||||||
return next(errors.MethodNotAllowed);
|
return next(errors.MethodNotAllowed);
|
||||||
}
|
}
|
||||||
log.trace('most recent version is delete marker',
|
log.trace('most recent version is delete marker',
|
||||||
{ method: 'objectGetTagging' });
|
{ method: 'objectGetTagging' });
|
||||||
return next(errors.NoSuchKey);
|
return next(errors.NoSuchKey);
|
||||||
}
|
}
|
||||||
return next(null, bucket, objectMD);
|
return next(null, bucket, objectMD);
|
||||||
}),
|
}),
|
||||||
(bucket, objectMD, next) => {
|
(bucket, objectMD, next) => {
|
||||||
const tags = objectMD.tags;
|
const tags = objectMD.tags;
|
||||||
const xml = convertToXml(tags);
|
const xml = convertToXml(tags);
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
const { errors, s3middleware } = require('arsenal');
|
const { errors, s3middleware } = require('arsenal');
|
||||||
const validateHeaders = s3middleware.validateConditionalHeaders;
|
const validateHeaders = s3middleware.validateConditionalHeaders;
|
||||||
const { parseRange } = require('arsenal/lib/network/http/utils');
|
const { parseRange } = require('arsenal').network.http.utils;
|
||||||
|
|
||||||
const { decodeVersionId } = require('./apiUtils/object/versioning');
|
const { decodeVersionId } = require('./apiUtils/object/versioning');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
|
|
@ -70,115 +70,115 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
|
||||||
log.trace('owner canonicalID to send to data', { canonicalID });
|
log.trace('owner canonicalID to send to data', { canonicalID });
|
||||||
|
|
||||||
return metadataValidateBucketAndObj(valParams, log,
|
return metadataValidateBucketAndObj(valParams, log,
|
||||||
(err, bucket, objMD) => {
|
(err, bucket, objMD) => {
|
||||||
const responseHeaders = collectCorsHeaders(headers.origin,
|
const responseHeaders = collectCorsHeaders(headers.origin,
|
||||||
method, bucket);
|
method, bucket);
|
||||||
if (err) {
|
|
||||||
log.trace('error processing request', {
|
|
||||||
error: err,
|
|
||||||
method: 'metadataValidateBucketAndObj',
|
|
||||||
});
|
|
||||||
return callback(err, responseHeaders);
|
|
||||||
}
|
|
||||||
if (bucket.hasDeletedFlag() && canonicalID !== bucket.getOwner()) {
|
|
||||||
log.trace('deleted flag on bucket and request ' +
|
|
||||||
'from non-owner account');
|
|
||||||
return callback(errors.NoSuchBucket);
|
|
||||||
}
|
|
||||||
|
|
||||||
return async.waterfall([
|
|
||||||
function handleTransientOrDeleteBuckets(next) {
|
|
||||||
if (bucket.hasTransientFlag() || bucket.hasDeletedFlag()) {
|
|
||||||
return cleanUpBucket(bucket, canonicalID, log, next);
|
|
||||||
}
|
|
||||||
return next();
|
|
||||||
},
|
|
||||||
function getSSEConfig(next) {
|
|
||||||
return getObjectSSEConfiguration(headers, bucket, log,
|
|
||||||
(err, sseConfig) => {
|
|
||||||
if (err) {
|
|
||||||
log.error('error getting server side encryption config', { err });
|
|
||||||
return next(invalidSSEError);
|
|
||||||
}
|
|
||||||
return next(null, sseConfig);
|
|
||||||
}
|
|
||||||
);
|
|
||||||
},
|
|
||||||
function createCipherBundle(serverSideEncryptionConfig, next) {
|
|
||||||
if (serverSideEncryptionConfig) {
|
|
||||||
return kms.createCipherBundle(
|
|
||||||
serverSideEncryptionConfig, log, next);
|
|
||||||
}
|
|
||||||
return next(null, null);
|
|
||||||
},
|
|
||||||
function objectCreateAndStore(cipherBundle, next) {
|
|
||||||
const objectLockValidationError
|
|
||||||
= validateHeaders(bucket, headers, log);
|
|
||||||
if (objectLockValidationError) {
|
|
||||||
return next(objectLockValidationError);
|
|
||||||
}
|
|
||||||
writeContinue(request, request._response);
|
|
||||||
return createAndStoreObject(bucketName,
|
|
||||||
bucket, objectKey, objMD, authInfo, canonicalID, cipherBundle,
|
|
||||||
request, false, streamingV4Params, log, next);
|
|
||||||
},
|
|
||||||
], (err, storingResult) => {
|
|
||||||
if (err) {
|
if (err) {
|
||||||
|
log.trace('error processing request', {
|
||||||
|
error: err,
|
||||||
|
method: 'metadataValidateBucketAndObj',
|
||||||
|
});
|
||||||
return callback(err, responseHeaders);
|
return callback(err, responseHeaders);
|
||||||
}
|
}
|
||||||
const newByteLength = parsedContentLength;
|
if (bucket.hasDeletedFlag() && canonicalID !== bucket.getOwner()) {
|
||||||
|
log.trace('deleted flag on bucket and request ' +
|
||||||
|
'from non-owner account');
|
||||||
|
return callback(errors.NoSuchBucket);
|
||||||
|
}
|
||||||
|
|
||||||
setExpirationHeaders(responseHeaders, {
|
return async.waterfall([
|
||||||
lifecycleConfig: bucket.getLifecycleConfiguration(),
|
function handleTransientOrDeleteBuckets(next) {
|
||||||
objectParams: {
|
if (bucket.hasTransientFlag() || bucket.hasDeletedFlag()) {
|
||||||
key: objectKey,
|
return cleanUpBucket(bucket, canonicalID, log, next);
|
||||||
date: storingResult.lastModified,
|
}
|
||||||
tags: storingResult.tags,
|
return next();
|
||||||
},
|
},
|
||||||
});
|
function getSSEConfig(next) {
|
||||||
|
return getObjectSSEConfiguration(headers, bucket, log,
|
||||||
// Utapi expects null or a number for oldByteLength:
|
(err, sseConfig) => {
|
||||||
// * null - new object
|
if (err) {
|
||||||
// * 0 or > 0 - existing object with content-length 0 or > 0
|
log.error('error getting server side encryption config', { err });
|
||||||
// objMD here is the master version that we would
|
return next(invalidSSEError);
|
||||||
// have overwritten if there was an existing version or object
|
}
|
||||||
//
|
return next(null, sseConfig);
|
||||||
// TODO: Handle utapi metrics for null version overwrites.
|
},
|
||||||
const oldByteLength = objMD && objMD['content-length']
|
);
|
||||||
!== undefined ? objMD['content-length'] : null;
|
},
|
||||||
if (storingResult) {
|
function createCipherBundle(serverSideEncryptionConfig, next) {
|
||||||
// ETag's hex should always be enclosed in quotes
|
if (serverSideEncryptionConfig) {
|
||||||
responseHeaders.ETag = `"${storingResult.contentMD5}"`;
|
return kms.createCipherBundle(
|
||||||
}
|
serverSideEncryptionConfig, log, next);
|
||||||
const vcfg = bucket.getVersioningConfiguration();
|
}
|
||||||
const isVersionedObj = vcfg && vcfg.Status === 'Enabled';
|
return next(null, null);
|
||||||
if (isVersionedObj) {
|
},
|
||||||
if (storingResult && storingResult.versionId) {
|
function objectCreateAndStore(cipherBundle, next) {
|
||||||
responseHeaders['x-amz-version-id'] =
|
const objectLockValidationError
|
||||||
versionIdUtils.encode(storingResult.versionId,
|
= validateHeaders(bucket, headers, log);
|
||||||
config.versionIdEncodingType);
|
if (objectLockValidationError) {
|
||||||
|
return next(objectLockValidationError);
|
||||||
|
}
|
||||||
|
writeContinue(request, request._response);
|
||||||
|
return createAndStoreObject(bucketName,
|
||||||
|
bucket, objectKey, objMD, authInfo, canonicalID, cipherBundle,
|
||||||
|
request, false, streamingV4Params, log, next);
|
||||||
|
},
|
||||||
|
], (err, storingResult) => {
|
||||||
|
if (err) {
|
||||||
|
return callback(err, responseHeaders);
|
||||||
}
|
}
|
||||||
}
|
const newByteLength = parsedContentLength;
|
||||||
|
|
||||||
// Only pre-existing non-versioned objects get 0 all others use 1
|
setExpirationHeaders(responseHeaders, {
|
||||||
const numberOfObjects = !isVersionedObj && oldByteLength !== null ? 0 : 1;
|
lifecycleConfig: bucket.getLifecycleConfiguration(),
|
||||||
|
objectParams: {
|
||||||
|
key: objectKey,
|
||||||
|
date: storingResult.lastModified,
|
||||||
|
tags: storingResult.tags,
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
// only the bucket owner's metrics should be updated, regardless of
|
// Utapi expects null or a number for oldByteLength:
|
||||||
// who the requester is
|
// * null - new object
|
||||||
pushMetric('putObject', log, {
|
// * 0 or > 0 - existing object with content-length 0 or > 0
|
||||||
authInfo,
|
// objMD here is the master version that we would
|
||||||
canonicalID: bucket.getOwner(),
|
// have overwritten if there was an existing version or object
|
||||||
bucket: bucketName,
|
//
|
||||||
keys: [objectKey],
|
// TODO: Handle utapi metrics for null version overwrites.
|
||||||
newByteLength,
|
const oldByteLength = objMD && objMD['content-length']
|
||||||
oldByteLength: isVersionedObj ? null : oldByteLength,
|
!== undefined ? objMD['content-length'] : null;
|
||||||
versionId: isVersionedObj && storingResult ? storingResult.versionId : undefined,
|
if (storingResult) {
|
||||||
location: bucket.getLocationConstraint(),
|
// ETag's hex should always be enclosed in quotes
|
||||||
numberOfObjects,
|
responseHeaders.ETag = `"${storingResult.contentMD5}"`;
|
||||||
|
}
|
||||||
|
const vcfg = bucket.getVersioningConfiguration();
|
||||||
|
const isVersionedObj = vcfg && vcfg.Status === 'Enabled';
|
||||||
|
if (isVersionedObj) {
|
||||||
|
if (storingResult && storingResult.versionId) {
|
||||||
|
responseHeaders['x-amz-version-id'] =
|
||||||
|
versionIdUtils.encode(storingResult.versionId,
|
||||||
|
config.versionIdEncodingType);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only pre-existing non-versioned objects get 0 all others use 1
|
||||||
|
const numberOfObjects = !isVersionedObj && oldByteLength !== null ? 0 : 1;
|
||||||
|
|
||||||
|
// only the bucket owner's metrics should be updated, regardless of
|
||||||
|
// who the requester is
|
||||||
|
pushMetric('putObject', log, {
|
||||||
|
authInfo,
|
||||||
|
canonicalID: bucket.getOwner(),
|
||||||
|
bucket: bucketName,
|
||||||
|
keys: [objectKey],
|
||||||
|
newByteLength,
|
||||||
|
oldByteLength: isVersionedObj ? null : oldByteLength,
|
||||||
|
versionId: isVersionedObj && storingResult ? storingResult.versionId : undefined,
|
||||||
|
location: bucket.getLocationConstraint(),
|
||||||
|
numberOfObjects,
|
||||||
|
});
|
||||||
|
return callback(null, responseHeaders);
|
||||||
});
|
});
|
||||||
return callback(null, responseHeaders);
|
|
||||||
});
|
});
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = objectPut;
|
module.exports = objectPut;
|
||||||
|
|
|
@ -99,7 +99,7 @@ function objectPutACL(authInfo, request, log, cb) {
|
||||||
aclUtils.parseGrant(request.headers['x-amz-grant-read'], 'READ');
|
aclUtils.parseGrant(request.headers['x-amz-grant-read'], 'READ');
|
||||||
const grantReadACPHeader =
|
const grantReadACPHeader =
|
||||||
aclUtils.parseGrant(request.headers['x-amz-grant-read-acp'],
|
aclUtils.parseGrant(request.headers['x-amz-grant-read-acp'],
|
||||||
'READ_ACP');
|
'READ_ACP');
|
||||||
const grantWriteACPHeader = aclUtils.parseGrant(
|
const grantWriteACPHeader = aclUtils.parseGrant(
|
||||||
request.headers['x-amz-grant-write-acp'], 'WRITE_ACP');
|
request.headers['x-amz-grant-write-acp'], 'WRITE_ACP');
|
||||||
const grantFullControlHeader = aclUtils.parseGrant(
|
const grantFullControlHeader = aclUtils.parseGrant(
|
||||||
|
@ -119,7 +119,7 @@ function objectPutACL(authInfo, request, log, cb) {
|
||||||
}
|
}
|
||||||
if (objectMD.isDeleteMarker) {
|
if (objectMD.isDeleteMarker) {
|
||||||
log.trace('delete marker detected',
|
log.trace('delete marker detected',
|
||||||
{ method: 'objectPutACL' });
|
{ method: 'objectPutACL' });
|
||||||
return next(errors.MethodNotAllowed, bucket);
|
return next(errors.MethodNotAllowed, bucket);
|
||||||
}
|
}
|
||||||
return next(null, bucket, objectMD);
|
return next(null, bucket, objectMD);
|
||||||
|
@ -202,7 +202,7 @@ function objectPutACL(authInfo, request, log, cb) {
|
||||||
if (!skip && granteeType === 'Group') {
|
if (!skip && granteeType === 'Group') {
|
||||||
if (possibleGroups.indexOf(grantee.URI[0]) < 0) {
|
if (possibleGroups.indexOf(grantee.URI[0]) < 0) {
|
||||||
log.trace('invalid user group',
|
log.trace('invalid user group',
|
||||||
{ userGroup: grantee.URI[0] });
|
{ userGroup: grantee.URI[0] });
|
||||||
return next(errors.InvalidArgument, bucket);
|
return next(errors.InvalidArgument, bucket);
|
||||||
}
|
}
|
||||||
return usersIdentifiedByGroup.push({
|
return usersIdentifiedByGroup.push({
|
||||||
|
@ -218,20 +218,20 @@ function objectPutACL(authInfo, request, log, cb) {
|
||||||
// through the access headers
|
// through the access headers
|
||||||
const allGrantHeaders =
|
const allGrantHeaders =
|
||||||
[].concat(grantReadHeader,
|
[].concat(grantReadHeader,
|
||||||
grantReadACPHeader, grantWriteACPHeader,
|
grantReadACPHeader, grantWriteACPHeader,
|
||||||
grantFullControlHeader);
|
grantFullControlHeader);
|
||||||
|
|
||||||
usersIdentifiedByEmail = allGrantHeaders.filter(item =>
|
usersIdentifiedByEmail = allGrantHeaders.filter(item =>
|
||||||
item && item.userIDType.toLowerCase() === 'emailaddress');
|
item && item.userIDType.toLowerCase() === 'emailaddress');
|
||||||
usersIdentifiedByGroup = allGrantHeaders
|
usersIdentifiedByGroup = allGrantHeaders
|
||||||
.filter(itm => itm && itm.userIDType
|
.filter(itm => itm && itm.userIDType
|
||||||
.toLowerCase() === 'uri');
|
.toLowerCase() === 'uri');
|
||||||
for (let i = 0; i < usersIdentifiedByGroup.length; i++) {
|
for (let i = 0; i < usersIdentifiedByGroup.length; i++) {
|
||||||
if (possibleGroups.indexOf(
|
if (possibleGroups.indexOf(
|
||||||
usersIdentifiedByGroup[i].identifier) < 0) {
|
usersIdentifiedByGroup[i].identifier) < 0) {
|
||||||
log.trace('invalid user group',
|
log.trace('invalid user group',
|
||||||
{ userGroup: usersIdentifiedByGroup[i]
|
{ userGroup: usersIdentifiedByGroup[i]
|
||||||
.identifier });
|
.identifier });
|
||||||
return next(errors.InvalidArgument, bucket);
|
return next(errors.InvalidArgument, bucket);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -131,10 +131,10 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
}
|
}
|
||||||
if (sourceObjMD.isDeleteMarker) {
|
if (sourceObjMD.isDeleteMarker) {
|
||||||
log.debug('delete marker on source object',
|
log.debug('delete marker on source object',
|
||||||
{ sourceObject });
|
{ sourceObject });
|
||||||
if (reqVersionId) {
|
if (reqVersionId) {
|
||||||
const err = errors.InvalidRequest
|
const err = errors.InvalidRequest
|
||||||
.customizeDescription('The source of a copy ' +
|
.customizeDescription('The source of a copy ' +
|
||||||
'request may not specifically refer to a delete' +
|
'request may not specifically refer to a delete' +
|
||||||
'marker by version id.');
|
'marker by version id.');
|
||||||
return next(err, destBucketMD);
|
return next(err, destBucketMD);
|
||||||
|
@ -146,8 +146,8 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
}
|
}
|
||||||
const headerValResult =
|
const headerValResult =
|
||||||
validateHeaders(request.headers,
|
validateHeaders(request.headers,
|
||||||
sourceObjMD['last-modified'],
|
sourceObjMD['last-modified'],
|
||||||
sourceObjMD['content-md5']);
|
sourceObjMD['content-md5']);
|
||||||
if (headerValResult.error) {
|
if (headerValResult.error) {
|
||||||
return next(errors.PreconditionFailed, destBucketMD);
|
return next(errors.PreconditionFailed, destBucketMD);
|
||||||
}
|
}
|
||||||
|
@ -182,7 +182,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
sourceLocationConstraintName, next) {
|
sourceLocationConstraintName, next) {
|
||||||
return metadata.getBucket(mpuBucketName, log,
|
return metadata.getBucket(mpuBucketName, log,
|
||||||
(err, mpuBucket) => {
|
(err, mpuBucket) => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
return next(errors.NoSuchUpload);
|
return next(errors.NoSuchUpload);
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -209,51 +209,51 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
const mpuOverviewKey =
|
const mpuOverviewKey =
|
||||||
`overview${splitter}${destObjectKey}${splitter}${uploadId}`;
|
`overview${splitter}${destObjectKey}${splitter}${uploadId}`;
|
||||||
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey,
|
return metadata.getObjectMD(mpuBucketName, mpuOverviewKey,
|
||||||
null, log, (err, res) => {
|
null, log, (err, res) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
if (err.NoSuchKey) {
|
if (err.is.NoSuchKey) {
|
||||||
return next(errors.NoSuchUpload);
|
return next(errors.NoSuchUpload);
|
||||||
}
|
}
|
||||||
log.error('error getting overview object from ' +
|
log.error('error getting overview object from ' +
|
||||||
'mpu bucket', {
|
'mpu bucket', {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'objectPutCopyPart::' +
|
method: 'objectPutCopyPart::' +
|
||||||
'metadata.getObjectMD',
|
'metadata.getObjectMD',
|
||||||
});
|
});
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
const initiatorID = res.initiator.ID;
|
const initiatorID = res.initiator.ID;
|
||||||
const requesterID = authInfo.isRequesterAnIAMUser() ?
|
const requesterID = authInfo.isRequesterAnIAMUser() ?
|
||||||
authInfo.getArn() : authInfo.getCanonicalID();
|
authInfo.getArn() : authInfo.getCanonicalID();
|
||||||
if (initiatorID !== requesterID) {
|
if (initiatorID !== requesterID) {
|
||||||
return next(errors.AccessDenied);
|
return next(errors.AccessDenied);
|
||||||
}
|
}
|
||||||
const destObjLocationConstraint =
|
const destObjLocationConstraint =
|
||||||
res.controllingLocationConstraint;
|
res.controllingLocationConstraint;
|
||||||
return next(null, dataLocator, destBucketMD,
|
return next(null, dataLocator, destBucketMD,
|
||||||
destObjLocationConstraint, copyObjectSize,
|
destObjLocationConstraint, copyObjectSize,
|
||||||
sourceVerId, sourceLocationConstraintName, splitter);
|
sourceVerId, sourceLocationConstraintName, splitter);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
function goGetData(dataLocator, destBucketMD,
|
function goGetData(dataLocator, destBucketMD,
|
||||||
destObjLocationConstraint, copyObjectSize, sourceVerId,
|
destObjLocationConstraint, copyObjectSize, sourceVerId,
|
||||||
sourceLocationConstraintName, splitter, next) {
|
sourceLocationConstraintName, splitter, next) {
|
||||||
data.uploadPartCopy(request, log, destBucketMD,
|
data.uploadPartCopy(request, log, destBucketMD,
|
||||||
sourceLocationConstraintName,
|
sourceLocationConstraintName,
|
||||||
destObjLocationConstraint, dataLocator, dataStoreContext,
|
destObjLocationConstraint, dataLocator, dataStoreContext,
|
||||||
(error, eTag, lastModified, serverSideEncryption, locations) => {
|
(error, eTag, lastModified, serverSideEncryption, locations) => {
|
||||||
if (error) {
|
if (error) {
|
||||||
if (error.message === 'skip') {
|
if (error.message === 'skip') {
|
||||||
return next(skipError, destBucketMD, eTag,
|
return next(skipError, destBucketMD, eTag,
|
||||||
lastModified, sourceVerId,
|
lastModified, sourceVerId,
|
||||||
serverSideEncryption, lastModified, splitter);
|
serverSideEncryption, lastModified, splitter);
|
||||||
|
}
|
||||||
|
return next(error, destBucketMD);
|
||||||
}
|
}
|
||||||
return next(error, destBucketMD);
|
return next(null, destBucketMD, locations, eTag,
|
||||||
}
|
copyObjectSize, sourceVerId, serverSideEncryption,
|
||||||
return next(null, destBucketMD, locations, eTag,
|
lastModified, splitter);
|
||||||
copyObjectSize, sourceVerId, serverSideEncryption,
|
});
|
||||||
lastModified, splitter);
|
|
||||||
});
|
|
||||||
},
|
},
|
||||||
function getExistingPartInfo(destBucketMD, locations, totalHash,
|
function getExistingPartInfo(destBucketMD, locations, totalHash,
|
||||||
copyObjectSize, sourceVerId, serverSideEncryption, lastModified,
|
copyObjectSize, sourceVerId, serverSideEncryption, lastModified,
|
||||||
|
@ -263,9 +263,9 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
metadata.getObjectMD(mpuBucketName, partKey, {}, log,
|
metadata.getObjectMD(mpuBucketName, partKey, {}, log,
|
||||||
(err, result) => {
|
(err, result) => {
|
||||||
// If there is nothing being overwritten just move on
|
// If there is nothing being overwritten just move on
|
||||||
if (err && !err.NoSuchKey) {
|
if (err && !err.is.NoSuchKey) {
|
||||||
log.debug('error getting current part (if any)',
|
log.debug('error getting current part (if any)',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
let oldLocations;
|
let oldLocations;
|
||||||
|
@ -299,7 +299,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
locations, metaStoreParams, log, err => {
|
locations, metaStoreParams, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error storing new metadata',
|
log.debug('error storing new metadata',
|
||||||
{ error: err, method: 'storeNewPartMetadata' });
|
{ error: err, method: 'storeNewPartMetadata' });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
return next(null, locations, oldLocations, destBucketMD, totalHash,
|
return next(null, locations, oldLocations, destBucketMD, totalHash,
|
||||||
|
@ -377,7 +377,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
|
||||||
request.method, destBucketMD);
|
request.method, destBucketMD);
|
||||||
if (err && err !== skipError) {
|
if (err && err !== skipError) {
|
||||||
log.trace('error from copy part waterfall',
|
log.trace('error from copy part waterfall',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
return callback(err, null, corsHeaders);
|
return callback(err, null, corsHeaders);
|
||||||
}
|
}
|
||||||
const xml = [
|
const xml = [
|
||||||
|
|
|
@ -47,33 +47,33 @@ function objectPutLegalHold(authInfo, request, log, callback) {
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => metadataValidateBucketAndObj(metadataValParams, log,
|
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(err, bucket, objectMD) => {
|
(err, bucket, objectMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('request authorization failed',
|
log.trace('request authorization failed',
|
||||||
{ method: 'objectPutLegalHold', error: err });
|
{ method: 'objectPutLegalHold', error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
if (!objectMD) {
|
if (!objectMD) {
|
||||||
const err = versionId ? errors.NoSuchVersion :
|
const err = versionId ? errors.NoSuchVersion :
|
||||||
errors.NoSuchKey;
|
errors.NoSuchKey;
|
||||||
log.trace('error no object metadata found',
|
log.trace('error no object metadata found',
|
||||||
{ method: 'objectPutLegalHold', error: err });
|
{ method: 'objectPutLegalHold', error: err });
|
||||||
return next(err, bucket);
|
return next(err, bucket);
|
||||||
}
|
}
|
||||||
if (objectMD.isDeleteMarker) {
|
if (objectMD.isDeleteMarker) {
|
||||||
log.trace('version is a delete marker',
|
log.trace('version is a delete marker',
|
||||||
{ method: 'objectPutLegalHold' });
|
{ method: 'objectPutLegalHold' });
|
||||||
return next(errors.MethodNotAllowed, bucket);
|
return next(errors.MethodNotAllowed, bucket);
|
||||||
}
|
}
|
||||||
if (!bucket.isObjectLockEnabled()) {
|
if (!bucket.isObjectLockEnabled()) {
|
||||||
log.trace('object lock not enabled on bucket',
|
log.trace('object lock not enabled on bucket',
|
||||||
{ method: 'objectPutLegalHold' });
|
{ method: 'objectPutLegalHold' });
|
||||||
return next(errors.InvalidRequest.customizeDescription(
|
return next(errors.InvalidRequest.customizeDescription(
|
||||||
'Bucket is missing Object Lock Configuration'
|
'Bucket is missing Object Lock Configuration',
|
||||||
), bucket);
|
), bucket);
|
||||||
}
|
}
|
||||||
return next(null, bucket, objectMD);
|
return next(null, bucket, objectMD);
|
||||||
}),
|
}),
|
||||||
(bucket, objectMD, next) => {
|
(bucket, objectMD, next) => {
|
||||||
log.trace('parsing legal hold');
|
log.trace('parsing legal hold');
|
||||||
parseLegalHoldXml(request.post, log, (err, res) =>
|
parseLegalHoldXml(request.post, log, (err, res) =>
|
||||||
|
|
|
@ -94,7 +94,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
// Get the destination bucket.
|
// Get the destination bucket.
|
||||||
next => metadata.getBucket(bucketName, log,
|
next => metadata.getBucket(bucketName, log,
|
||||||
(err, destinationBucket) => {
|
(err, destinationBucket) => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
return next(errors.NoSuchBucket, destinationBucket);
|
return next(errors.NoSuchBucket, destinationBucket);
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -128,8 +128,8 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error processing the cipher bundle for ' +
|
log.error('error processing the cipher bundle for ' +
|
||||||
'the destination bucket', {
|
'the destination bucket', {
|
||||||
error: err,
|
error: err,
|
||||||
});
|
});
|
||||||
return next(err, destinationBucket);
|
return next(err, destinationBucket);
|
||||||
}
|
}
|
||||||
return next(null, destinationBucket, res);
|
return next(null, destinationBucket, res);
|
||||||
|
@ -141,24 +141,24 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
// Get the MPU shadow bucket.
|
// Get the MPU shadow bucket.
|
||||||
(destinationBucket, cipherBundle, next) =>
|
(destinationBucket, cipherBundle, next) =>
|
||||||
metadata.getBucket(mpuBucketName, log,
|
metadata.getBucket(mpuBucketName, log,
|
||||||
(err, mpuBucket) => {
|
(err, mpuBucket) => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
return next(errors.NoSuchUpload, destinationBucket);
|
return next(errors.NoSuchUpload, destinationBucket);
|
||||||
}
|
}
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error getting the shadow mpu bucket', {
|
log.error('error getting the shadow mpu bucket', {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'objectPutPart::metadata.getBucket',
|
method: 'objectPutPart::metadata.getBucket',
|
||||||
});
|
});
|
||||||
return next(err, destinationBucket);
|
return next(err, destinationBucket);
|
||||||
}
|
}
|
||||||
let splitter = constants.splitter;
|
let splitter = constants.splitter;
|
||||||
// BACKWARD: Remove to remove the old splitter
|
// BACKWARD: Remove to remove the old splitter
|
||||||
if (mpuBucket.getMdBucketModelVersion() < 2) {
|
if (mpuBucket.getMdBucketModelVersion() < 2) {
|
||||||
splitter = constants.oldSplitter;
|
splitter = constants.oldSplitter;
|
||||||
}
|
}
|
||||||
return next(null, destinationBucket, cipherBundle, splitter);
|
return next(null, destinationBucket, cipherBundle, splitter);
|
||||||
}),
|
}),
|
||||||
// Check authorization of the MPU shadow bucket.
|
// Check authorization of the MPU shadow bucket.
|
||||||
(destinationBucket, cipherBundle, splitter, next) => {
|
(destinationBucket, cipherBundle, splitter, next) => {
|
||||||
const mpuOverviewKey = _getOverviewKey(splitter, objectKey,
|
const mpuOverviewKey = _getOverviewKey(splitter, objectKey,
|
||||||
|
@ -189,7 +189,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
// If data backend is backend that handles mpu (like real AWS),
|
// If data backend is backend that handles mpu (like real AWS),
|
||||||
// no need to store part info in metadata
|
// no need to store part info in metadata
|
||||||
(destinationBucket, objectLocationConstraint, cipherBundle,
|
(destinationBucket, objectLocationConstraint, cipherBundle,
|
||||||
splitter, next) => {
|
splitter, next) => {
|
||||||
if (config.backends.data === 'multiple') {
|
if (config.backends.data === 'multiple') {
|
||||||
// if mpu was initiated in legacy version
|
// if mpu was initiated in legacy version
|
||||||
if (objectLocationConstraint === undefined) {
|
if (objectLocationConstraint === undefined) {
|
||||||
|
@ -204,45 +204,45 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
objectLocationConstraint = backendInfoObj.controllingLC;
|
objectLocationConstraint = backendInfoObj.controllingLC;
|
||||||
}
|
}
|
||||||
if (!multipleBackendGateway.isClientHandleMpu(
|
if (!multipleBackendGateway.isClientHandleMpu(
|
||||||
objectLocationConstraint)) {
|
objectLocationConstraint)) {
|
||||||
// if data backend doesn't handle MPU, continue waterfall
|
// if data backend doesn't handle MPU, continue waterfall
|
||||||
return next(null, destinationBucket,
|
return next(null, destinationBucket,
|
||||||
objectLocationConstraint, cipherBundle, splitter, null);
|
objectLocationConstraint, cipherBundle, splitter, null);
|
||||||
}
|
}
|
||||||
writeContinue(request, request._response);
|
writeContinue(request, request._response);
|
||||||
return multipleBackendGateway.uploadPart(request,
|
return multipleBackendGateway.uploadPart(request,
|
||||||
streamingV4Params, null, size, objectLocationConstraint,
|
streamingV4Params, null, size, objectLocationConstraint,
|
||||||
objectKey, uploadId, partNumber, bucketName, log,
|
objectKey, uploadId, partNumber, bucketName, log,
|
||||||
(err, partInfo) => {
|
(err, partInfo) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error putting part to data backend', {
|
log.error('error putting part to data backend', {
|
||||||
error: err,
|
error: err,
|
||||||
method:
|
method:
|
||||||
'objectPutPart::multipleBackendGateway.uploadPart',
|
'objectPutPart::multipleBackendGateway.uploadPart',
|
||||||
});
|
});
|
||||||
return next(err, destinationBucket);
|
return next(err, destinationBucket);
|
||||||
} else if (partInfo &&
|
} else if (partInfo &&
|
||||||
partInfo.dataStoreType === 'aws_s3') {
|
partInfo.dataStoreType === 'aws_s3') {
|
||||||
// if data backend handles MPU, skip to end of waterfall
|
// if data backend handles MPU, skip to end of waterfall
|
||||||
return next(skipError, destinationBucket,
|
return next(skipError, destinationBucket,
|
||||||
partInfo.dataStoreETag);
|
partInfo.dataStoreETag);
|
||||||
} else if (partInfo && partInfo.dataStoreType === 'azure') {
|
} else if (partInfo && partInfo.dataStoreType === 'azure') {
|
||||||
return next(null, destinationBucket,
|
return next(null, destinationBucket,
|
||||||
objectLocationConstraint, cipherBundle, splitter,
|
objectLocationConstraint, cipherBundle, splitter,
|
||||||
partInfo);
|
partInfo);
|
||||||
}
|
}
|
||||||
let msg = 'backend is managing MPU but was';
|
let msg = 'backend is managing MPU but was';
|
||||||
msg += ' not handle after uploadPart';
|
msg += ' not handle after uploadPart';
|
||||||
log.error(msg, {
|
log.error(msg, {
|
||||||
error: errors.InternalError,
|
error: errors.InternalError,
|
||||||
method:
|
method:
|
||||||
'objectPutPart::multipleBackendGateway.uploadPart',
|
'objectPutPart::multipleBackendGateway.uploadPart',
|
||||||
|
});
|
||||||
|
return next(errors.InternalError, destinationBucket);
|
||||||
});
|
});
|
||||||
return next(errors.InternalError, destinationBucket);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
return next(null, destinationBucket, objectLocationConstraint,
|
return next(null, destinationBucket, objectLocationConstraint,
|
||||||
cipherBundle, splitter, null);
|
cipherBundle, splitter, null);
|
||||||
},
|
},
|
||||||
// Get any pre-existing part.
|
// Get any pre-existing part.
|
||||||
(destinationBucket, objectLocationConstraint, cipherBundle,
|
(destinationBucket, objectLocationConstraint, cipherBundle,
|
||||||
|
@ -252,7 +252,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
return metadata.getObjectMD(mpuBucketName, partKey, {}, log,
|
return metadata.getObjectMD(mpuBucketName, partKey, {}, log,
|
||||||
(err, res) => {
|
(err, res) => {
|
||||||
// If there is no object with the same key, continue.
|
// If there is no object with the same key, continue.
|
||||||
if (err && !err.NoSuchKey) {
|
if (err && !err.is.NoSuchKey) {
|
||||||
log.error('error getting current part (if any)', {
|
log.error('error getting current part (if any)', {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'objectPutPart::metadata.getObjectMD',
|
method: 'objectPutPart::metadata.getObjectMD',
|
||||||
|
@ -278,14 +278,14 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
},
|
},
|
||||||
// Store in data backend.
|
// Store in data backend.
|
||||||
(destinationBucket, objectLocationConstraint, cipherBundle,
|
(destinationBucket, objectLocationConstraint, cipherBundle,
|
||||||
partKey, prevObjectSize, oldLocations, partInfo, splitter, next) => {
|
partKey, prevObjectSize, oldLocations, partInfo, splitter, next) => {
|
||||||
// NOTE: set oldLocations to null so we do not batchDelete for now
|
// NOTE: set oldLocations to null so we do not batchDelete for now
|
||||||
if (partInfo && partInfo.dataStoreType === 'azure') {
|
if (partInfo && partInfo.dataStoreType === 'azure') {
|
||||||
// skip to storing metadata
|
// skip to storing metadata
|
||||||
return next(null, destinationBucket, partInfo,
|
return next(null, destinationBucket, partInfo,
|
||||||
partInfo.dataStoreETag,
|
partInfo.dataStoreETag,
|
||||||
cipherBundle, partKey, prevObjectSize, null,
|
cipherBundle, partKey, prevObjectSize, null,
|
||||||
objectLocationConstraint, splitter);
|
objectLocationConstraint, splitter);
|
||||||
}
|
}
|
||||||
const objectContext = {
|
const objectContext = {
|
||||||
bucketName,
|
bucketName,
|
||||||
|
@ -311,7 +311,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
// Store data locations in metadata and delete any overwritten
|
// Store data locations in metadata and delete any overwritten
|
||||||
// data if completeMPU hasn't been initiated yet.
|
// data if completeMPU hasn't been initiated yet.
|
||||||
(destinationBucket, dataGetInfo, hexDigest, cipherBundle, partKey,
|
(destinationBucket, dataGetInfo, hexDigest, cipherBundle, partKey,
|
||||||
prevObjectSize, oldLocations, objectLocationConstraint, splitter, next) => {
|
prevObjectSize, oldLocations, objectLocationConstraint, splitter, next) => {
|
||||||
// Use an array to be consistent with objectPutCopyPart where there
|
// Use an array to be consistent with objectPutCopyPart where there
|
||||||
// could be multiple locations.
|
// could be multiple locations.
|
||||||
const partLocations = [dataGetInfo];
|
const partLocations = [dataGetInfo];
|
||||||
|
@ -346,7 +346,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
(partLocations, oldLocations, objectLocationConstraint, destinationBucket,
|
(partLocations, oldLocations, objectLocationConstraint, destinationBucket,
|
||||||
hexDigest, prevObjectSize, splitter, next) => {
|
hexDigest, prevObjectSize, splitter, next) => {
|
||||||
if (!oldLocations) {
|
if (!oldLocations) {
|
||||||
return next(null, oldLocations, objectLocationConstraint,
|
return next(null, oldLocations, objectLocationConstraint,
|
||||||
destinationBucket, hexDigest, prevObjectSize);
|
destinationBucket, hexDigest, prevObjectSize);
|
||||||
|
|
|
@ -50,33 +50,33 @@ function objectPutRetention(authInfo, request, log, callback) {
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => metadataValidateBucketAndObj(metadataValParams, log,
|
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(err, bucket, objectMD) => {
|
(err, bucket, objectMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('request authorization failed',
|
log.trace('request authorization failed',
|
||||||
{ method: 'objectPutRetention', error: err });
|
{ method: 'objectPutRetention', error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
if (!objectMD) {
|
if (!objectMD) {
|
||||||
const err = reqVersionId ? errors.NoSuchVersion :
|
const err = reqVersionId ? errors.NoSuchVersion :
|
||||||
errors.NoSuchKey;
|
errors.NoSuchKey;
|
||||||
log.trace('error no object metadata found',
|
log.trace('error no object metadata found',
|
||||||
{ method: 'objectPutRetention', error: err });
|
{ method: 'objectPutRetention', error: err });
|
||||||
return next(err, bucket);
|
return next(err, bucket);
|
||||||
}
|
}
|
||||||
if (objectMD.isDeleteMarker) {
|
if (objectMD.isDeleteMarker) {
|
||||||
log.trace('version is a delete marker',
|
log.trace('version is a delete marker',
|
||||||
{ method: 'objectPutRetention' });
|
{ method: 'objectPutRetention' });
|
||||||
return next(errors.MethodNotAllowed, bucket);
|
return next(errors.MethodNotAllowed, bucket);
|
||||||
}
|
}
|
||||||
if (!bucket.isObjectLockEnabled()) {
|
if (!bucket.isObjectLockEnabled()) {
|
||||||
log.trace('object lock not enabled on bucket',
|
log.trace('object lock not enabled on bucket',
|
||||||
{ method: 'objectPutRetention' });
|
{ method: 'objectPutRetention' });
|
||||||
return next(errors.InvalidRequest.customizeDescription(
|
return next(errors.InvalidRequest.customizeDescription(
|
||||||
'Bucket is missing Object Lock Configuration'
|
'Bucket is missing Object Lock Configuration',
|
||||||
), bucket);
|
), bucket);
|
||||||
}
|
}
|
||||||
return next(null, bucket, objectMD);
|
return next(null, bucket, objectMD);
|
||||||
}),
|
}),
|
||||||
(bucket, objectMD, next) => {
|
(bucket, objectMD, next) => {
|
||||||
log.trace('parsing retention information');
|
log.trace('parsing retention information');
|
||||||
parseRetentionXml(request.post, log,
|
parseRetentionXml(request.post, log,
|
||||||
|
|
|
@ -49,30 +49,30 @@ function objectPutTagging(authInfo, request, log, callback) {
|
||||||
|
|
||||||
return async.waterfall([
|
return async.waterfall([
|
||||||
next => metadataValidateBucketAndObj(metadataValParams, log,
|
next => metadataValidateBucketAndObj(metadataValParams, log,
|
||||||
(err, bucket, objectMD) => {
|
(err, bucket, objectMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('request authorization failed',
|
log.trace('request authorization failed',
|
||||||
{ method: 'objectPutTagging', error: err });
|
{ method: 'objectPutTagging', error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
if (!objectMD) {
|
if (!objectMD) {
|
||||||
const err = reqVersionId ? errors.NoSuchVersion :
|
const err = reqVersionId ? errors.NoSuchVersion :
|
||||||
errors.NoSuchKey;
|
errors.NoSuchKey;
|
||||||
log.trace('error no object metadata found',
|
log.trace('error no object metadata found',
|
||||||
{ method: 'objectPutTagging', error: err });
|
{ method: 'objectPutTagging', error: err });
|
||||||
return next(err, bucket);
|
return next(err, bucket);
|
||||||
}
|
}
|
||||||
if (objectMD.isDeleteMarker) {
|
if (objectMD.isDeleteMarker) {
|
||||||
log.trace('version is a delete marker',
|
log.trace('version is a delete marker',
|
||||||
{ method: 'objectPutTagging' });
|
{ method: 'objectPutTagging' });
|
||||||
return next(errors.MethodNotAllowed, bucket);
|
return next(errors.MethodNotAllowed, bucket);
|
||||||
}
|
}
|
||||||
return next(null, bucket, objectMD);
|
return next(null, bucket, objectMD);
|
||||||
}),
|
}),
|
||||||
(bucket, objectMD, next) => {
|
(bucket, objectMD, next) => {
|
||||||
log.trace('parsing tag(s)');
|
log.trace('parsing tag(s)');
|
||||||
parseTagXml(request.post, log, (err, tags) =>
|
parseTagXml(request.post, log, (err, tags) =>
|
||||||
next(err, bucket, tags, objectMD));
|
next(err, bucket, tags, objectMD));
|
||||||
},
|
},
|
||||||
(bucket, tags, objectMD, next) => {
|
(bucket, tags, objectMD, next) => {
|
||||||
// eslint-disable-next-line no-param-reassign
|
// eslint-disable-next-line no-param-reassign
|
||||||
|
@ -87,13 +87,13 @@ function objectPutTagging(authInfo, request, log, callback) {
|
||||||
objectMD.replicationInfo, replicationInfo);
|
objectMD.replicationInfo, replicationInfo);
|
||||||
}
|
}
|
||||||
metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params,
|
metadata.putObjectMD(bucket.getName(), objectKey, objectMD, params,
|
||||||
log, err =>
|
log, err =>
|
||||||
next(err, bucket, objectMD));
|
next(err, bucket, objectMD));
|
||||||
},
|
},
|
||||||
(bucket, objectMD, next) => {
|
(bucket, objectMD, next) => {
|
||||||
if (config.backends.data === 'multiple') {
|
if (config.backends.data === 'multiple') {
|
||||||
return multipleBackendGateway.objectTagging('Put', objectKey,
|
return multipleBackendGateway.objectTagging('Put', objectKey,
|
||||||
bucket, objectMD, log, err => next(err, bucket, objectMD));
|
bucket, objectMD, log, err => next(err, bucket, objectMD));
|
||||||
}
|
}
|
||||||
return next(null, bucket, objectMD);
|
return next(null, bucket, objectMD);
|
||||||
},
|
},
|
||||||
|
|
|
@ -35,7 +35,7 @@ function generateXml(xml, owner, userBuckets, splitter) {
|
||||||
`<Name>${key}</Name>`,
|
`<Name>${key}</Name>`,
|
||||||
`<CreationDate>${bucket.value.creationDate}` +
|
`<CreationDate>${bucket.value.creationDate}` +
|
||||||
'</CreationDate>',
|
'</CreationDate>',
|
||||||
'</Bucket>'
|
'</Bucket>',
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
xml.push('</Buckets></ListAllMyBucketsResult>');
|
xml.push('</Buckets></ListAllMyBucketsResult>');
|
||||||
|
@ -68,7 +68,7 @@ function serviceGet(authInfo, request, log, callback) {
|
||||||
`<DisplayName>${authInfo.getAccountDisplayName()}` +
|
`<DisplayName>${authInfo.getAccountDisplayName()}` +
|
||||||
'</DisplayName>',
|
'</DisplayName>',
|
||||||
'</Owner>',
|
'</Owner>',
|
||||||
'<Buckets>'
|
'<Buckets>',
|
||||||
);
|
);
|
||||||
return services.getService(authInfo, request, log, constants.splitter,
|
return services.getService(authInfo, request, log, constants.splitter,
|
||||||
(err, userBuckets, splitter) => {
|
(err, userBuckets, splitter) => {
|
||||||
|
|
|
@ -100,7 +100,7 @@ function websiteGet(request, log, callback) {
|
||||||
const websiteConfig = bucket.getWebsiteConfiguration();
|
const websiteConfig = bucket.getWebsiteConfiguration();
|
||||||
if (!websiteConfig) {
|
if (!websiteConfig) {
|
||||||
return callback(errors.NoSuchWebsiteConfiguration, false, null,
|
return callback(errors.NoSuchWebsiteConfiguration, false, null,
|
||||||
corsHeaders);
|
corsHeaders);
|
||||||
}
|
}
|
||||||
// any errors above would be our own created generic error html
|
// any errors above would be our own created generic error html
|
||||||
// if have a website config, error going forward would be user's
|
// if have a website config, error going forward would be user's
|
||||||
|
@ -141,19 +141,19 @@ function websiteGet(request, log, callback) {
|
||||||
// not want to append index key to redirect location
|
// not want to append index key to redirect location
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('error retrieving object metadata',
|
log.trace('error retrieving object metadata',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
let returnErr = err;
|
let returnErr = err;
|
||||||
const bucketAuthorized = isBucketAuthorized(bucket,
|
const bucketAuthorized = isBucketAuthorized(bucket,
|
||||||
'bucketGet', constants.publicId, null, log, request);
|
'bucketGet', constants.publicId, null, log, request);
|
||||||
// if index object does not exist and bucket is private AWS
|
// if index object does not exist and bucket is private AWS
|
||||||
// returns 403 - AccessDenied error.
|
// returns 403 - AccessDenied error.
|
||||||
if (err === errors.NoSuchKey && !bucketAuthorized) {
|
if (err.is.NoSuchKey && !bucketAuthorized) {
|
||||||
returnErr = errors.AccessDenied;
|
returnErr = errors.AccessDenied;
|
||||||
}
|
}
|
||||||
return _errorActions(returnErr,
|
return _errorActions(returnErr,
|
||||||
websiteConfig.getErrorDocument(), routingRules,
|
websiteConfig.getErrorDocument(), routingRules,
|
||||||
bucket, reqObjectKey, corsHeaders, log,
|
bucket, reqObjectKey, corsHeaders, log,
|
||||||
callback);
|
callback);
|
||||||
}
|
}
|
||||||
if (!isObjAuthorized(bucket, objMD, 'objectGet',
|
if (!isObjAuthorized(bucket, objMD, 'objectGet',
|
||||||
constants.publicId, null, log, request)) {
|
constants.publicId, null, log, request)) {
|
||||||
|
|
|
@ -101,13 +101,13 @@ function websiteHead(request, log, callback) {
|
||||||
// not want to append index key to redirect location
|
// not want to append index key to redirect location
|
||||||
if (err) {
|
if (err) {
|
||||||
log.trace('error retrieving object metadata',
|
log.trace('error retrieving object metadata',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
let returnErr = err;
|
let returnErr = err;
|
||||||
const bucketAuthorized = isBucketAuthorized(bucket,
|
const bucketAuthorized = isBucketAuthorized(bucket,
|
||||||
'bucketGet', constants.publicId, null, log, request);
|
'bucketGet', constants.publicId, null, log, request);
|
||||||
// if index object does not exist and bucket is private AWS
|
// if index object does not exist and bucket is private AWS
|
||||||
// returns 403 - AccessDenied error.
|
// returns 403 - AccessDenied error.
|
||||||
if (err === errors.NoSuchKey && !bucketAuthorized) {
|
if (err.is.NoSuchKey && !bucketAuthorized) {
|
||||||
returnErr = errors.AccessDenied;
|
returnErr = errors.AccessDenied;
|
||||||
}
|
}
|
||||||
return _errorActions(returnErr, routingRules,
|
return _errorActions(returnErr, routingRules,
|
||||||
|
|
|
@ -283,7 +283,7 @@ class V4Transform extends Transform {
|
||||||
}
|
}
|
||||||
// get next chunk
|
// get next chunk
|
||||||
return callback();
|
return callback();
|
||||||
}
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@ const { config } = require('../../Config');
|
||||||
|
|
||||||
const missingVerIdInternalError = errors.InternalError.customizeDescription(
|
const missingVerIdInternalError = errors.InternalError.customizeDescription(
|
||||||
'Invalid state. Please ensure versioning is enabled ' +
|
'Invalid state. Please ensure versioning is enabled ' +
|
||||||
'in AWS for the location constraint and try again.'
|
'in AWS for the location constraint and try again.',
|
||||||
);
|
);
|
||||||
|
|
||||||
class AwsClient {
|
class AwsClient {
|
||||||
|
@ -32,23 +32,23 @@ class AwsClient {
|
||||||
}
|
}
|
||||||
put(stream, size, keyContext, reqUids, callback) {
|
put(stream, size, keyContext, reqUids, callback) {
|
||||||
const awsKey = this._createAwsKey(keyContext.bucketName,
|
const awsKey = this._createAwsKey(keyContext.bucketName,
|
||||||
keyContext.objectKey, this._bucketMatch);
|
keyContext.objectKey, this._bucketMatch);
|
||||||
const metaHeaders = trimXMetaPrefix(keyContext.metaHeaders);
|
const metaHeaders = trimXMetaPrefix(keyContext.metaHeaders);
|
||||||
const log = createLogger(reqUids);
|
const log = createLogger(reqUids);
|
||||||
|
|
||||||
const putCb = (err, data) => {
|
const putCb = (err, data) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
logHelper(log, 'error', 'err from data backend',
|
logHelper(log, 'error', 'err from data backend',
|
||||||
err, this._dataStoreName);
|
err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (!data.VersionId) {
|
if (!data.VersionId) {
|
||||||
logHelper(log, 'error', 'missing version id for data ' +
|
logHelper(log, 'error', 'missing version id for data ' +
|
||||||
'backend object', missingVerIdInternalError,
|
'backend object', missingVerIdInternalError,
|
||||||
this._dataStoreName);
|
this._dataStoreName);
|
||||||
return callback(missingVerIdInternalError);
|
return callback(missingVerIdInternalError);
|
||||||
}
|
}
|
||||||
const dataStoreVersionId = data.VersionId;
|
const dataStoreVersionId = data.VersionId;
|
||||||
|
@ -105,15 +105,15 @@ class AwsClient {
|
||||||
'from datastore', err, this._dataStoreName);
|
'from datastore', err, this._dataStoreName);
|
||||||
if (err.code === 'NotFound') {
|
if (err.code === 'NotFound') {
|
||||||
const error = errors.ServiceUnavailable
|
const error = errors.ServiceUnavailable
|
||||||
.customizeDescription(
|
.customizeDescription(
|
||||||
'Unexpected error from AWS: "NotFound". Data on AWS ' +
|
'Unexpected error from AWS: "NotFound". Data on AWS ' +
|
||||||
'may have been altered outside of CloudServer.'
|
'may have been altered outside of CloudServer.',
|
||||||
);
|
);
|
||||||
return callback(error);
|
return callback(error);
|
||||||
}
|
}
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return callback();
|
return callback();
|
||||||
|
@ -129,11 +129,11 @@ class AwsClient {
|
||||||
Range: range ? `bytes=${range[0]}-${range[1]}` : null,
|
Range: range ? `bytes=${range[0]}-${range[1]}` : null,
|
||||||
}).on('success', response => {
|
}).on('success', response => {
|
||||||
log.trace('AWS GET request response headers',
|
log.trace('AWS GET request response headers',
|
||||||
{ responseHeaders: response.httpResponse.headers });
|
{ responseHeaders: response.httpResponse.headers });
|
||||||
});
|
});
|
||||||
const stream = request.createReadStream().on('error', err => {
|
const stream = request.createReadStream().on('error', err => {
|
||||||
logHelper(log, 'error', 'error streaming data from AWS',
|
logHelper(log, 'error', 'error streaming data from AWS',
|
||||||
err, this._dataStoreName);
|
err, this._dataStoreName);
|
||||||
return callback(err);
|
return callback(err);
|
||||||
});
|
});
|
||||||
return callback(null, stream);
|
return callback(null, stream);
|
||||||
|
@ -159,8 +159,8 @@ class AwsClient {
|
||||||
return callback();
|
return callback();
|
||||||
}
|
}
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return callback();
|
return callback();
|
||||||
|
@ -170,33 +170,33 @@ class AwsClient {
|
||||||
healthcheck(location, callback) {
|
healthcheck(location, callback) {
|
||||||
const awsResp = {};
|
const awsResp = {};
|
||||||
this._client.headBucket({ Bucket: this._awsBucketName },
|
this._client.headBucket({ Bucket: this._awsBucketName },
|
||||||
err => {
|
err => {
|
||||||
/* eslint-disable no-param-reassign */
|
/* eslint-disable no-param-reassign */
|
||||||
if (err) {
|
|
||||||
awsResp[location] = { error: err, external: true };
|
|
||||||
return callback(null, awsResp);
|
|
||||||
}
|
|
||||||
return this._client.getBucketVersioning({
|
|
||||||
Bucket: this._awsBucketName },
|
|
||||||
(err, data) => {
|
|
||||||
if (err) {
|
if (err) {
|
||||||
awsResp[location] = { error: err, external: true };
|
awsResp[location] = { error: err, external: true };
|
||||||
} else if (!data.Status ||
|
return callback(null, awsResp);
|
||||||
data.Status === 'Suspended') {
|
|
||||||
awsResp[location] = {
|
|
||||||
versioningStatus: data.Status,
|
|
||||||
error: 'Versioning must be enabled',
|
|
||||||
external: true,
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
awsResp[location] = {
|
|
||||||
versioningStatus: data.Status,
|
|
||||||
message: 'Congrats! You own the bucket',
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
return callback(null, awsResp);
|
return this._client.getBucketVersioning({
|
||||||
|
Bucket: this._awsBucketName },
|
||||||
|
(err, data) => {
|
||||||
|
if (err) {
|
||||||
|
awsResp[location] = { error: err, external: true };
|
||||||
|
} else if (!data.Status ||
|
||||||
|
data.Status === 'Suspended') {
|
||||||
|
awsResp[location] = {
|
||||||
|
versioningStatus: data.Status,
|
||||||
|
error: 'Versioning must be enabled',
|
||||||
|
external: true,
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
awsResp[location] = {
|
||||||
|
versioningStatus: data.Status,
|
||||||
|
message: 'Congrats! You own the bucket',
|
||||||
|
};
|
||||||
|
}
|
||||||
|
return callback(null, awsResp);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
createMPU(key, metaHeaders, bucketName, websiteRedirectHeader, contentType,
|
createMPU(key, metaHeaders, bucketName, websiteRedirectHeader, contentType,
|
||||||
|
@ -228,10 +228,10 @@ class AwsClient {
|
||||||
return this._client.createMultipartUpload(params, (err, mpuResObj) => {
|
return this._client.createMultipartUpload(params, (err, mpuResObj) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
logHelper(log, 'error', 'err from data backend',
|
logHelper(log, 'error', 'err from data backend',
|
||||||
err, this._dataStoreName);
|
err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return callback(null, mpuResObj);
|
return callback(null, mpuResObj);
|
||||||
|
@ -239,7 +239,7 @@ class AwsClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadPart(request, streamingV4Params, stream, size, key, uploadId,
|
uploadPart(request, streamingV4Params, stream, size, key, uploadId,
|
||||||
partNumber, bucketName, log, callback) {
|
partNumber, bucketName, log, callback) {
|
||||||
let hashedStream = stream;
|
let hashedStream = stream;
|
||||||
if (request) {
|
if (request) {
|
||||||
const partStream = prepareStream(request, streamingV4Params,
|
const partStream = prepareStream(request, streamingV4Params,
|
||||||
|
@ -258,8 +258,8 @@ class AwsClient {
|
||||||
logHelper(log, 'error', 'err from data backend ' +
|
logHelper(log, 'error', 'err from data backend ' +
|
||||||
'on uploadPart', err, this._dataStoreName);
|
'on uploadPart', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
// Because we manually add quotes to ETag later, remove quotes here
|
// Because we manually add quotes to ETag later, remove quotes here
|
||||||
|
@ -276,7 +276,7 @@ class AwsClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
listParts(key, uploadId, bucketName, partNumberMarker, maxParts, log,
|
listParts(key, uploadId, bucketName, partNumberMarker, maxParts, log,
|
||||||
callback) {
|
callback) {
|
||||||
const awsBucket = this._awsBucketName;
|
const awsBucket = this._awsBucketName;
|
||||||
const awsKey = this._createAwsKey(bucketName, key, this._bucketMatch);
|
const awsKey = this._createAwsKey(bucketName, key, this._bucketMatch);
|
||||||
const params = { Bucket: awsBucket, Key: awsKey, UploadId: uploadId,
|
const params = { Bucket: awsBucket, Key: awsKey, UploadId: uploadId,
|
||||||
|
@ -284,10 +284,10 @@ class AwsClient {
|
||||||
return this._client.listParts(params, (err, partList) => {
|
return this._client.listParts(params, (err, partList) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
logHelper(log, 'error', 'err from data backend on listPart',
|
logHelper(log, 'error', 'err from data backend on listPart',
|
||||||
err, this._dataStoreName);
|
err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
// build storedParts object to mimic Scality S3 backend returns
|
// build storedParts object to mimic Scality S3 backend returns
|
||||||
|
@ -348,46 +348,46 @@ class AwsClient {
|
||||||
};
|
};
|
||||||
const completeObjData = { key: awsKey };
|
const completeObjData = { key: awsKey };
|
||||||
return this._client.completeMultipartUpload(mpuParams,
|
return this._client.completeMultipartUpload(mpuParams,
|
||||||
(err, completeMpuRes) => {
|
(err, completeMpuRes) => {
|
||||||
if (err) {
|
|
||||||
if (mpuError[err.code]) {
|
|
||||||
logHelper(log, 'trace', 'err from data backend on ' +
|
|
||||||
'completeMPU', err, this._dataStoreName);
|
|
||||||
return callback(errors[err.code]);
|
|
||||||
}
|
|
||||||
logHelper(log, 'error', 'err from data backend on ' +
|
|
||||||
'completeMPU', err, this._dataStoreName);
|
|
||||||
return callback(errors.ServiceUnavailable
|
|
||||||
.customizeDescription('Error returned from ' +
|
|
||||||
`AWS: ${err.message}`)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if (!completeMpuRes.VersionId) {
|
|
||||||
logHelper(log, 'error', 'missing version id for data ' +
|
|
||||||
'backend object', missingVerIdInternalError,
|
|
||||||
this._dataStoreName);
|
|
||||||
return callback(missingVerIdInternalError);
|
|
||||||
}
|
|
||||||
// need to get content length of new object to store
|
|
||||||
// in our metadata
|
|
||||||
return this._client.headObject({ Bucket: awsBucket, Key: awsKey },
|
|
||||||
(err, objHeaders) => {
|
|
||||||
if (err) {
|
if (err) {
|
||||||
logHelper(log, 'trace', 'err from data backend on ' +
|
if (mpuError[err.code]) {
|
||||||
'headObject', err, this._dataStoreName);
|
logHelper(log, 'trace', 'err from data backend on ' +
|
||||||
|
'completeMPU', err, this._dataStoreName);
|
||||||
|
return callback(errors[err.code]);
|
||||||
|
}
|
||||||
|
logHelper(log, 'error', 'err from data backend on ' +
|
||||||
|
'completeMPU', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
// remove quotes from eTag because they're added later
|
if (!completeMpuRes.VersionId) {
|
||||||
completeObjData.eTag = completeMpuRes.ETag
|
logHelper(log, 'error', 'missing version id for data ' +
|
||||||
.substring(1, completeMpuRes.ETag.length - 1);
|
'backend object', missingVerIdInternalError,
|
||||||
completeObjData.dataStoreVersionId = completeMpuRes.VersionId;
|
this._dataStoreName);
|
||||||
completeObjData.contentLength = objHeaders.ContentLength;
|
return callback(missingVerIdInternalError);
|
||||||
return callback(null, completeObjData);
|
}
|
||||||
|
// need to get content length of new object to store
|
||||||
|
// in our metadata
|
||||||
|
return this._client.headObject({ Bucket: awsBucket, Key: awsKey },
|
||||||
|
(err, objHeaders) => {
|
||||||
|
if (err) {
|
||||||
|
logHelper(log, 'trace', 'err from data backend on ' +
|
||||||
|
'headObject', err, this._dataStoreName);
|
||||||
|
return callback(errors.ServiceUnavailable
|
||||||
|
.customizeDescription('Error returned from ' +
|
||||||
|
`AWS: ${err.message}`),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
// remove quotes from eTag because they're added later
|
||||||
|
completeObjData.eTag = completeMpuRes.ETag
|
||||||
|
.substring(1, completeMpuRes.ETag.length - 1);
|
||||||
|
completeObjData.dataStoreVersionId = completeMpuRes.VersionId;
|
||||||
|
completeObjData.contentLength = objHeaders.ContentLength;
|
||||||
|
return callback(null, completeObjData);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
abortMPU(key, uploadId, bucketName, log, callback) {
|
abortMPU(key, uploadId, bucketName, log, callback) {
|
||||||
|
@ -402,8 +402,8 @@ class AwsClient {
|
||||||
'the MPU on AWS S3. You should abort directly on AWS S3 ' +
|
'the MPU on AWS S3. You should abort directly on AWS S3 ' +
|
||||||
'using the same uploadId.', err, this._dataStoreName);
|
'using the same uploadId.', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return callback();
|
return callback();
|
||||||
|
@ -430,8 +430,8 @@ class AwsClient {
|
||||||
logHelper(log, 'error', 'error from data backend on ' +
|
logHelper(log, 'error', 'error from data backend on ' +
|
||||||
'putObjectTagging', err, this._dataStoreName);
|
'putObjectTagging', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return callback();
|
return callback();
|
||||||
|
@ -452,19 +452,19 @@ class AwsClient {
|
||||||
logHelper(log, 'error', 'error from data backend on ' +
|
logHelper(log, 'error', 'error from data backend on ' +
|
||||||
'deleteObjectTagging', err, this._dataStoreName);
|
'deleteObjectTagging', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return callback();
|
return callback();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
copyObject(request, destLocationConstraintName, sourceKey,
|
copyObject(request, destLocationConstraintName, sourceKey,
|
||||||
sourceLocationConstraintName, storeMetadataParams, log, callback) {
|
sourceLocationConstraintName, storeMetadataParams, log, callback) {
|
||||||
const destBucketName = request.bucketName;
|
const destBucketName = request.bucketName;
|
||||||
const destObjectKey = request.objectKey;
|
const destObjectKey = request.objectKey;
|
||||||
const destAwsKey = this._createAwsKey(destBucketName, destObjectKey,
|
const destAwsKey = this._createAwsKey(destBucketName, destObjectKey,
|
||||||
this._bucketMatch);
|
this._bucketMatch);
|
||||||
|
|
||||||
const sourceAwsBucketName =
|
const sourceAwsBucketName =
|
||||||
config.getAwsBucketName(sourceLocationConstraintName);
|
config.getAwsBucketName(sourceLocationConstraintName);
|
||||||
|
@ -489,32 +489,32 @@ class AwsClient {
|
||||||
`${sourceAwsBucketName} AWS bucket`, err,
|
`${sourceAwsBucketName} AWS bucket`, err,
|
||||||
this._dataStoreName);
|
this._dataStoreName);
|
||||||
return callback(errors.AccessDenied
|
return callback(errors.AccessDenied
|
||||||
.customizeDescription('Error: Unable to access ' +
|
.customizeDescription('Error: Unable to access ' +
|
||||||
`${sourceAwsBucketName} AWS bucket`)
|
`${sourceAwsBucketName} AWS bucket`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
logHelper(log, 'error', 'error from data backend on ' +
|
logHelper(log, 'error', 'error from data backend on ' +
|
||||||
'copyObject', err, this._dataStoreName);
|
'copyObject', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (!copyResult.VersionId) {
|
if (!copyResult.VersionId) {
|
||||||
logHelper(log, 'error', 'missing version id for data ' +
|
logHelper(log, 'error', 'missing version id for data ' +
|
||||||
'backend object', missingVerIdInternalError,
|
'backend object', missingVerIdInternalError,
|
||||||
this._dataStoreName);
|
this._dataStoreName);
|
||||||
return callback(missingVerIdInternalError);
|
return callback(missingVerIdInternalError);
|
||||||
}
|
}
|
||||||
return callback(null, destAwsKey, copyResult.VersionId);
|
return callback(null, destAwsKey, copyResult.VersionId);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
uploadPartCopy(request, awsSourceKey, sourceLocationConstraintName,
|
uploadPartCopy(request, awsSourceKey, sourceLocationConstraintName,
|
||||||
log, callback) {
|
log, callback) {
|
||||||
const destBucketName = request.bucketName;
|
const destBucketName = request.bucketName;
|
||||||
const destObjectKey = request.objectKey;
|
const destObjectKey = request.objectKey;
|
||||||
const destAwsKey = this._createAwsKey(destBucketName, destObjectKey,
|
const destAwsKey = this._createAwsKey(destBucketName, destObjectKey,
|
||||||
this._bucketMatch);
|
this._bucketMatch);
|
||||||
|
|
||||||
const sourceAwsBucketName =
|
const sourceAwsBucketName =
|
||||||
config.getAwsBucketName(sourceLocationConstraintName);
|
config.getAwsBucketName(sourceLocationConstraintName);
|
||||||
|
@ -538,15 +538,15 @@ class AwsClient {
|
||||||
`${sourceAwsBucketName} AWS bucket`, err,
|
`${sourceAwsBucketName} AWS bucket`, err,
|
||||||
this._dataStoreName);
|
this._dataStoreName);
|
||||||
return callback(errors.AccessDenied
|
return callback(errors.AccessDenied
|
||||||
.customizeDescription('Error: Unable to access ' +
|
.customizeDescription('Error: Unable to access ' +
|
||||||
`${sourceAwsBucketName} AWS bucket`)
|
`${sourceAwsBucketName} AWS bucket`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
logHelper(log, 'error', 'error from data backend on ' +
|
logHelper(log, 'error', 'error from data backend on ' +
|
||||||
'uploadPartCopy', err, this._dataStoreName);
|
'uploadPartCopy', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
const eTag = removeQuotes(res.CopyPartResult.ETag);
|
const eTag = removeQuotes(res.CopyPartResult.ETag);
|
||||||
|
|
|
@ -42,7 +42,7 @@ class AzureClient {
|
||||||
if (log) {
|
if (log) {
|
||||||
log.error('error thrown by Azure Storage Client Library',
|
log.error('error thrown by Azure Storage Client Library',
|
||||||
{ error: err.message, stack: err.stack, s3Method,
|
{ error: err.message, stack: err.stack, s3Method,
|
||||||
azureMethod, dataStoreName: this._dataStoreName });
|
azureMethod, dataStoreName: this._dataStoreName });
|
||||||
}
|
}
|
||||||
cb(error.customizeDescription('Error from Azure ' +
|
cb(error.customizeDescription('Error from Azure ' +
|
||||||
`method: ${azureMethod} on ${s3Method} S3 call: ` +
|
`method: ${azureMethod} on ${s3Method} S3 call: ` +
|
||||||
|
@ -83,75 +83,75 @@ class AzureClient {
|
||||||
};
|
};
|
||||||
|
|
||||||
return metadata.listMultipartUploads(mpuBucketName, listingParams,
|
return metadata.listMultipartUploads(mpuBucketName, listingParams,
|
||||||
log, (err, mpuList) => {
|
log, (err, mpuList) => {
|
||||||
if (err && !err.NoSuchBucket) {
|
if (err && !err.NoSuchBucket) {
|
||||||
log.error('Error listing MPUs for Azure delete',
|
log.error('Error listing MPUs for Azure delete',
|
||||||
{ error: err, dataStoreName });
|
{ error: err, dataStoreName });
|
||||||
return cb(errors.ServiceUnavailable);
|
return cb(errors.ServiceUnavailable);
|
||||||
}
|
}
|
||||||
if (mpuList && mpuList.Uploads && mpuList.Uploads.length > 0) {
|
if (mpuList && mpuList.Uploads && mpuList.Uploads.length > 0) {
|
||||||
const error = errors.MPUinProgress;
|
const error = errors.MPUinProgress;
|
||||||
log.error('Error: cannot put/delete object to Azure with ' +
|
log.error('Error: cannot put/delete object to Azure with ' +
|
||||||
'same key name as ongoing MPU on Azure',
|
'same key name as ongoing MPU on Azure',
|
||||||
{ error, dataStoreName });
|
{ error, dataStoreName });
|
||||||
return cb(error);
|
return cb(error);
|
||||||
}
|
}
|
||||||
// If listMultipartUploads returns a NoSuchBucket error or the
|
// If listMultipartUploads returns a NoSuchBucket error or the
|
||||||
// mpu list is empty, there are no conflicting MPUs, so continue
|
// mpu list is empty, there are no conflicting MPUs, so continue
|
||||||
return cb();
|
return cb();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
put(stream, size, keyContext, reqUids, callback) {
|
put(stream, size, keyContext, reqUids, callback) {
|
||||||
const log = createLogger(reqUids);
|
const log = createLogger(reqUids);
|
||||||
// before blob is put, make sure there is no ongoing MPU with same key
|
// before blob is put, make sure there is no ongoing MPU with same key
|
||||||
this.protectAzureBlocks(keyContext.bucketName,
|
this.protectAzureBlocks(keyContext.bucketName,
|
||||||
keyContext.objectKey, this._dataStoreName, log, err => {
|
keyContext.objectKey, this._dataStoreName, log, err => {
|
||||||
// if error returned, there is ongoing MPU, so do not put
|
// if error returned, there is ongoing MPU, so do not put
|
||||||
if (err) {
|
if (err) {
|
||||||
return callback(err.customizeDescription(
|
return callback(err.customizeDescription(
|
||||||
`Error putting object to Azure: ${err.message}`));
|
`Error putting object to Azure: ${err.message}`));
|
||||||
}
|
}
|
||||||
const azureKey = this._createAzureKey(keyContext.bucketName,
|
const azureKey = this._createAzureKey(keyContext.bucketName,
|
||||||
keyContext.objectKey, this._bucketMatch);
|
keyContext.objectKey, this._bucketMatch);
|
||||||
const options = {
|
const options = {
|
||||||
metadata: translateAzureMetaHeaders(keyContext.metaHeaders,
|
metadata: translateAzureMetaHeaders(keyContext.metaHeaders,
|
||||||
keyContext.tagging),
|
keyContext.tagging),
|
||||||
contentSettings: {
|
contentSettings: {
|
||||||
contentType: keyContext.contentType || undefined,
|
contentType: keyContext.contentType || undefined,
|
||||||
cacheControl: keyContext.cacheControl || undefined,
|
cacheControl: keyContext.cacheControl || undefined,
|
||||||
contentDisposition: keyContext.contentDisposition ||
|
contentDisposition: keyContext.contentDisposition ||
|
||||||
undefined,
|
undefined,
|
||||||
contentEncoding: keyContext.contentEncoding || undefined,
|
contentEncoding: keyContext.contentEncoding || undefined,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
if (size === 0) {
|
if (size === 0) {
|
||||||
return this._errorWrapper('put', 'createBlockBlobFromText',
|
return this._errorWrapper('put', 'createBlockBlobFromText',
|
||||||
[this._azureContainerName, azureKey, '', options,
|
[this._azureContainerName, azureKey, '', options,
|
||||||
err => {
|
err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
logHelper(log, 'error', 'err from Azure PUT data ' +
|
logHelper(log, 'error', 'err from Azure PUT data ' +
|
||||||
'backend', err, this._dataStoreName);
|
'backend', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`Azure: ${err.message}`));
|
`Azure: ${err.message}`));
|
||||||
}
|
}
|
||||||
return callback(null, azureKey);
|
return callback(null, azureKey);
|
||||||
}], log, callback);
|
}], log, callback);
|
||||||
}
|
}
|
||||||
return this._errorWrapper('put', 'createBlockBlobFromStream',
|
return this._errorWrapper('put', 'createBlockBlobFromStream',
|
||||||
[this._azureContainerName, azureKey, stream, size, options,
|
[this._azureContainerName, azureKey, stream, size, options,
|
||||||
err => {
|
err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
logHelper(log, 'error', 'err from Azure PUT data ' +
|
logHelper(log, 'error', 'err from Azure PUT data ' +
|
||||||
'backend', err, this._dataStoreName);
|
'backend', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`Azure: ${err.message}`));
|
`Azure: ${err.message}`));
|
||||||
}
|
}
|
||||||
return callback(null, azureKey);
|
return callback(null, azureKey);
|
||||||
}], log, callback);
|
}], log, callback);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
head(objectGetInfo, reqUids, callback) {
|
head(objectGetInfo, reqUids, callback) {
|
||||||
|
@ -159,24 +159,24 @@ class AzureClient {
|
||||||
const { key, azureStreamingOptions } = objectGetInfo;
|
const { key, azureStreamingOptions } = objectGetInfo;
|
||||||
return this._errorWrapper('head', 'getBlobProperties',
|
return this._errorWrapper('head', 'getBlobProperties',
|
||||||
[this._azureContainerName, key, azureStreamingOptions,
|
[this._azureContainerName, key, azureStreamingOptions,
|
||||||
err => {
|
err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
logHelper(log, 'error', 'err from Azure HEAD data backend',
|
logHelper(log, 'error', 'err from Azure HEAD data backend',
|
||||||
err, this._dataStoreName);
|
err, this._dataStoreName);
|
||||||
if (err.code === 'NotFound') {
|
if (err.code === 'NotFound') {
|
||||||
const error = errors.ServiceUnavailable
|
const error = errors.ServiceUnavailable
|
||||||
.customizeDescription(
|
.customizeDescription(
|
||||||
'Unexpected error from Azure: "NotFound". Data ' +
|
'Unexpected error from Azure: "NotFound". Data ' +
|
||||||
'on Azure may have been altered outside of ' +
|
'on Azure may have been altered outside of ' +
|
||||||
'CloudServer.');
|
'CloudServer.');
|
||||||
return callback(error);
|
return callback(error);
|
||||||
}
|
}
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`Azure: ${err.message}`));
|
`Azure: ${err.message}`));
|
||||||
}
|
}
|
||||||
return callback();
|
return callback();
|
||||||
}], log, callback);
|
}], log, callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
get(objectGetInfo, range, reqUids, callback) {
|
get(objectGetInfo, range, reqUids, callback) {
|
||||||
|
@ -195,14 +195,14 @@ class AzureClient {
|
||||||
}
|
}
|
||||||
this._errorWrapper('get', 'getBlobToStream',
|
this._errorWrapper('get', 'getBlobToStream',
|
||||||
[this._azureContainerName, key, response, streamingOptions,
|
[this._azureContainerName, key, response, streamingOptions,
|
||||||
err => {
|
err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
logHelper(log, 'error', 'err from Azure GET data backend',
|
logHelper(log, 'error', 'err from Azure GET data backend',
|
||||||
err, this._dataStoreName);
|
err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable);
|
return callback(errors.ServiceUnavailable);
|
||||||
}
|
}
|
||||||
return callback(null, response);
|
return callback(null, response);
|
||||||
}], log, callback);
|
}], log, callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(objectGetInfo, reqUids, callback) {
|
delete(objectGetInfo, reqUids, callback) {
|
||||||
|
@ -212,17 +212,17 @@ class AzureClient {
|
||||||
objectGetInfo.key;
|
objectGetInfo.key;
|
||||||
return this._errorWrapper('delete', 'deleteBlobIfExists',
|
return this._errorWrapper('delete', 'deleteBlobIfExists',
|
||||||
[this._azureContainerName, key,
|
[this._azureContainerName, key,
|
||||||
err => {
|
err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
const log = createLogger(reqUids);
|
const log = createLogger(reqUids);
|
||||||
logHelper(log, 'error', 'error deleting object from ' +
|
logHelper(log, 'error', 'error deleting object from ' +
|
||||||
'Azure datastore', err, this._dataStoreName);
|
'Azure datastore', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`Azure: ${err.message}`));
|
`Azure: ${err.message}`));
|
||||||
}
|
}
|
||||||
return callback();
|
return callback();
|
||||||
}], log, callback);
|
}], log, callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
healthcheck(location, callback, flightCheckOnStartUp) {
|
healthcheck(location, callback, flightCheckOnStartUp) {
|
||||||
|
@ -246,7 +246,7 @@ class AzureClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
uploadPart(request, streamingV4Params, partStream, size, key, uploadId,
|
uploadPart(request, streamingV4Params, partStream, size, key, uploadId,
|
||||||
partNumber, bucket, log, callback) {
|
partNumber, bucket, log, callback) {
|
||||||
const azureKey = this._createAzureKey(bucket, key, this._bucketMatch);
|
const azureKey = this._createAzureKey(bucket, key, this._bucketMatch);
|
||||||
const params = { bucketName: this._azureContainerName,
|
const params = { bucketName: this._azureContainerName,
|
||||||
partNumber, size, objectKey: azureKey, uploadId };
|
partNumber, size, objectKey: azureKey, uploadId };
|
||||||
|
@ -274,27 +274,27 @@ class AzureClient {
|
||||||
if (size <= azureMpuUtils.maxSubPartSize) {
|
if (size <= azureMpuUtils.maxSubPartSize) {
|
||||||
const errorWrapperFn = this._errorWrapper.bind(this);
|
const errorWrapperFn = this._errorWrapper.bind(this);
|
||||||
return azureMpuUtils.putSinglePart(errorWrapperFn,
|
return azureMpuUtils.putSinglePart(errorWrapperFn,
|
||||||
stream, params, this._dataStoreName, log, (err, dataStoreETag) => {
|
stream, params, this._dataStoreName, log, (err, dataStoreETag) => {
|
||||||
|
if (err) {
|
||||||
|
return callback(err);
|
||||||
|
}
|
||||||
|
dataRetrievalInfo.dataStoreETag = dataStoreETag;
|
||||||
|
return callback(null, dataRetrievalInfo);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
const errorWrapperFn = this._errorWrapper.bind(this);
|
||||||
|
return azureMpuUtils.putSubParts(errorWrapperFn, stream,
|
||||||
|
params, this._dataStoreName, log, (err, dataStoreETag) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
dataRetrievalInfo.dataStoreETag = dataStoreETag;
|
dataRetrievalInfo.dataStoreETag = dataStoreETag;
|
||||||
return callback(null, dataRetrievalInfo);
|
return callback(null, dataRetrievalInfo);
|
||||||
});
|
});
|
||||||
}
|
|
||||||
const errorWrapperFn = this._errorWrapper.bind(this);
|
|
||||||
return azureMpuUtils.putSubParts(errorWrapperFn, stream,
|
|
||||||
params, this._dataStoreName, log, (err, dataStoreETag) => {
|
|
||||||
if (err) {
|
|
||||||
return callback(err);
|
|
||||||
}
|
|
||||||
dataRetrievalInfo.dataStoreETag = dataStoreETag;
|
|
||||||
return callback(null, dataRetrievalInfo);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
completeMPU(jsonList, mdInfo, key, uploadId, bucket, metaHeaders,
|
completeMPU(jsonList, mdInfo, key, uploadId, bucket, metaHeaders,
|
||||||
contentSettings, log, callback) {
|
contentSettings, log, callback) {
|
||||||
const azureKey = this._createAzureKey(bucket, key, this._bucketMatch);
|
const azureKey = this._createAzureKey(bucket, key, this._bucketMatch);
|
||||||
const commitList = {
|
const commitList = {
|
||||||
UncommittedBlocks: jsonList.uncommittedBlocks || [],
|
UncommittedBlocks: jsonList.uncommittedBlocks || [],
|
||||||
|
@ -319,20 +319,20 @@ class AzureClient {
|
||||||
};
|
};
|
||||||
this._errorWrapper('completeMPU', 'commitBlocks',
|
this._errorWrapper('completeMPU', 'commitBlocks',
|
||||||
[this._azureContainerName, azureKey, commitList, options,
|
[this._azureContainerName, azureKey, commitList, options,
|
||||||
err => {
|
err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
logHelper(log, 'error', 'err completing MPU on Azure ' +
|
logHelper(log, 'error', 'err completing MPU on Azure ' +
|
||||||
'datastore', err, this._dataStoreName);
|
'datastore', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`Azure: ${err.message}`));
|
`Azure: ${err.message}`));
|
||||||
}
|
}
|
||||||
const completeObjData = {
|
const completeObjData = {
|
||||||
key: azureKey,
|
key: azureKey,
|
||||||
filteredPartsObj,
|
filteredPartsObj,
|
||||||
};
|
};
|
||||||
return callback(null, completeObjData);
|
return callback(null, completeObjData);
|
||||||
}], log, callback);
|
}], log, callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
objectPutTagging(key, bucket, objectMD, log, callback) {
|
objectPutTagging(key, bucket, objectMD, log, callback) {
|
||||||
|
@ -341,14 +341,14 @@ class AzureClient {
|
||||||
azureMD.tags = JSON.stringify(objectMD.tags);
|
azureMD.tags = JSON.stringify(objectMD.tags);
|
||||||
this._errorWrapper('objectPutTagging', 'setBlobMetadata',
|
this._errorWrapper('objectPutTagging', 'setBlobMetadata',
|
||||||
[this._azureContainerName, azureKey, azureMD,
|
[this._azureContainerName, azureKey, azureMD,
|
||||||
err => {
|
err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
logHelper(log, 'error', 'err putting object tags to ' +
|
logHelper(log, 'error', 'err putting object tags to ' +
|
||||||
'Azure backend', err, this._dataStoreName);
|
'Azure backend', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable);
|
return callback(errors.ServiceUnavailable);
|
||||||
}
|
}
|
||||||
return callback();
|
return callback();
|
||||||
}], log, callback);
|
}], log, callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
objectDeleteTagging(key, bucket, objectMD, log, callback) {
|
objectDeleteTagging(key, bucket, objectMD, log, callback) {
|
||||||
|
@ -356,27 +356,27 @@ class AzureClient {
|
||||||
const azureMD = this._getMetaHeaders(objectMD);
|
const azureMD = this._getMetaHeaders(objectMD);
|
||||||
this._errorWrapper('objectDeleteTagging', 'setBlobMetadata',
|
this._errorWrapper('objectDeleteTagging', 'setBlobMetadata',
|
||||||
[this._azureContainerName, azureKey, azureMD,
|
[this._azureContainerName, azureKey, azureMD,
|
||||||
err => {
|
err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
logHelper(log, 'error', 'err putting object tags to ' +
|
logHelper(log, 'error', 'err putting object tags to ' +
|
||||||
'Azure backend', err, this._dataStoreName);
|
'Azure backend', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable);
|
return callback(errors.ServiceUnavailable);
|
||||||
}
|
}
|
||||||
return callback();
|
return callback();
|
||||||
}], log, callback);
|
}], log, callback);
|
||||||
}
|
}
|
||||||
|
|
||||||
copyObject(request, destLocationConstraintName, sourceKey,
|
copyObject(request, destLocationConstraintName, sourceKey,
|
||||||
sourceLocationConstraintName, storeMetadataParams, log, callback) {
|
sourceLocationConstraintName, storeMetadataParams, log, callback) {
|
||||||
const destContainerName = request.bucketName;
|
const destContainerName = request.bucketName;
|
||||||
const destObjectKey = request.objectKey;
|
const destObjectKey = request.objectKey;
|
||||||
|
|
||||||
const destAzureKey = this._createAzureKey(destContainerName,
|
const destAzureKey = this._createAzureKey(destContainerName,
|
||||||
destObjectKey, this._bucketMatch);
|
destObjectKey, this._bucketMatch);
|
||||||
|
|
||||||
const sourceContainerName =
|
const sourceContainerName =
|
||||||
config.locationConstraints[sourceLocationConstraintName]
|
config.locationConstraints[sourceLocationConstraintName]
|
||||||
.details.azureContainerName;
|
.details.azureContainerName;
|
||||||
|
|
||||||
let options;
|
let options;
|
||||||
if (storeMetadataParams.metaHeaders) {
|
if (storeMetadataParams.metaHeaders) {
|
||||||
|
@ -387,7 +387,7 @@ class AzureClient {
|
||||||
this._errorWrapper('copyObject', 'startCopyBlob',
|
this._errorWrapper('copyObject', 'startCopyBlob',
|
||||||
[`${this._azureStorageEndpoint}` +
|
[`${this._azureStorageEndpoint}` +
|
||||||
`${sourceContainerName}/${sourceKey}`,
|
`${sourceContainerName}/${sourceKey}`,
|
||||||
this._azureContainerName, destAzureKey, options,
|
this._azureContainerName, destAzureKey, options,
|
||||||
(err, res) => {
|
(err, res) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
if (err.code === 'CannotVerifyCopySource') {
|
if (err.code === 'CannotVerifyCopySource') {
|
||||||
|
@ -395,36 +395,36 @@ class AzureClient {
|
||||||
`${sourceContainerName} Azure Container`, err,
|
`${sourceContainerName} Azure Container`, err,
|
||||||
this._dataStoreName);
|
this._dataStoreName);
|
||||||
return callback(errors.AccessDenied
|
return callback(errors.AccessDenied
|
||||||
.customizeDescription('Error: Unable to access ' +
|
.customizeDescription('Error: Unable to access ' +
|
||||||
`${sourceContainerName} Azure Container`)
|
`${sourceContainerName} Azure Container`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
logHelper(log, 'error', 'error from data backend on ' +
|
logHelper(log, 'error', 'error from data backend on ' +
|
||||||
'copyObject', err, this._dataStoreName);
|
'copyObject', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS: ${err.message}`)
|
`AWS: ${err.message}`),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if (res.copy.status === 'pending') {
|
if (res.copy.status === 'pending') {
|
||||||
logHelper(log, 'error', 'Azure copy status is pending',
|
logHelper(log, 'error', 'Azure copy status is pending',
|
||||||
err, this._dataStoreName);
|
err, this._dataStoreName);
|
||||||
const copyId = res.copy.id;
|
const copyId = res.copy.id;
|
||||||
this._client.abortCopyBlob(this._azureContainerName,
|
this._client.abortCopyBlob(this._azureContainerName,
|
||||||
destAzureKey, copyId, err => {
|
destAzureKey, copyId, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
logHelper(log, 'error', 'error from data backend ' +
|
logHelper(log, 'error', 'error from data backend ' +
|
||||||
'on abortCopyBlob', err, this._dataStoreName);
|
'on abortCopyBlob', err, this._dataStoreName);
|
||||||
return callback(errors.ServiceUnavailable
|
return callback(errors.ServiceUnavailable
|
||||||
.customizeDescription('Error returned from ' +
|
.customizeDescription('Error returned from ' +
|
||||||
`AWS on abortCopyBlob: ${err.message}`)
|
`AWS on abortCopyBlob: ${err.message}`),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return callback(errors.InvalidObjectState
|
||||||
|
.customizeDescription('Error: Azure copy status was ' +
|
||||||
|
'pending. It has been aborted successfully'),
|
||||||
);
|
);
|
||||||
}
|
});
|
||||||
return callback(errors.InvalidObjectState
|
|
||||||
.customizeDescription('Error: Azure copy status was ' +
|
|
||||||
'pending. It has been aborted successfully')
|
|
||||||
);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
return callback(null, destAzureKey);
|
return callback(null, destAzureKey);
|
||||||
}], log, callback);
|
}], log, callback);
|
||||||
|
|
|
@ -14,7 +14,7 @@ const azureHealth = {
|
||||||
const utils = {
|
const utils = {
|
||||||
logHelper(log, level, description, error, dataStoreName) {
|
logHelper(log, level, description, error, dataStoreName) {
|
||||||
log[level](description, { error: error.message,
|
log[level](description, { error: error.message,
|
||||||
errorName: error.name, dataStoreName });
|
errorName: error.name, dataStoreName });
|
||||||
},
|
},
|
||||||
// take off the 'x-amz-meta-'
|
// take off the 'x-amz-meta-'
|
||||||
trimXMetaPrefix(obj) {
|
trimXMetaPrefix(obj) {
|
||||||
|
@ -70,7 +70,7 @@ const utils = {
|
||||||
* same account since Azure copy outside of an account is async
|
* same account since Azure copy outside of an account is async
|
||||||
*/
|
*/
|
||||||
externalBackendCopy(locationConstraintSrc, locationConstraintDest,
|
externalBackendCopy(locationConstraintSrc, locationConstraintDest,
|
||||||
sourceBucketMD, destBucketMD) {
|
sourceBucketMD, destBucketMD) {
|
||||||
const sourceBucketName = sourceBucketMD.getName();
|
const sourceBucketName = sourceBucketMD.getName();
|
||||||
const destBucketName = destBucketMD.getName();
|
const destBucketName = destBucketMD.getName();
|
||||||
const isSameBucket = sourceBucketName === destBucketName;
|
const isSameBucket = sourceBucketName === destBucketName;
|
||||||
|
@ -86,11 +86,11 @@ const utils = {
|
||||||
(sourceLocationConstraintType === 'aws_s3' ||
|
(sourceLocationConstraintType === 'aws_s3' ||
|
||||||
(sourceLocationConstraintType === 'azure' &&
|
(sourceLocationConstraintType === 'azure' &&
|
||||||
config.isSameAzureAccount(locationConstraintSrc,
|
config.isSameAzureAccount(locationConstraintSrc,
|
||||||
locationConstraintDest)));
|
locationConstraintDest)));
|
||||||
},
|
},
|
||||||
|
|
||||||
checkExternalBackend(clients, locations, type, flightCheckOnStartUp,
|
checkExternalBackend(clients, locations, type, flightCheckOnStartUp,
|
||||||
externalBackendHealthCheckInterval, cb) {
|
externalBackendHealthCheckInterval, cb) {
|
||||||
const checkStatus = type === 'aws_s3' ? awsHealth : azureHealth;
|
const checkStatus = type === 'aws_s3' ? awsHealth : azureHealth;
|
||||||
if (locations.length === 0) {
|
if (locations.length === 0) {
|
||||||
return process.nextTick(cb, null, []);
|
return process.nextTick(cb, null, []);
|
||||||
|
|
|
@ -32,16 +32,16 @@ const backend = {
|
||||||
}
|
}
|
||||||
cursor += data.length;
|
cursor += data.length;
|
||||||
})
|
})
|
||||||
.on('end', () => {
|
.on('end', () => {
|
||||||
if (exceeded) {
|
if (exceeded) {
|
||||||
log.error('data stream exceed announced size',
|
log.error('data stream exceed announced size',
|
||||||
{ size, overflow: cursor });
|
{ size, overflow: cursor });
|
||||||
callback(errors.InternalError);
|
callback(errors.InternalError);
|
||||||
} else {
|
} else {
|
||||||
ds[count] = { value, keyContext };
|
ds[count] = { value, keyContext };
|
||||||
callback(null, count++);
|
callback(null, count++);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
get: function getMem(objectGetInfo, range, reqUids, callback) {
|
get: function getMem(objectGetInfo, range, reqUids, callback) {
|
||||||
|
|
|
@ -19,14 +19,14 @@ config.on('location-constraints-update', () => {
|
||||||
const multipleBackendGateway = {
|
const multipleBackendGateway = {
|
||||||
|
|
||||||
put: (hashedStream, size, keyContext,
|
put: (hashedStream, size, keyContext,
|
||||||
backendInfo, reqUids, callback) => {
|
backendInfo, reqUids, callback) => {
|
||||||
const controllingLocationConstraint =
|
const controllingLocationConstraint =
|
||||||
backendInfo.getControllingLocationConstraint();
|
backendInfo.getControllingLocationConstraint();
|
||||||
const client = clients[controllingLocationConstraint];
|
const client = clients[controllingLocationConstraint];
|
||||||
if (!client) {
|
if (!client) {
|
||||||
const log = createLogger(reqUids);
|
const log = createLogger(reqUids);
|
||||||
log.error('no data backend matching controlling locationConstraint',
|
log.error('no data backend matching controlling locationConstraint',
|
||||||
{ controllingLocationConstraint });
|
{ controllingLocationConstraint });
|
||||||
return process.nextTick(() => {
|
return process.nextTick(() => {
|
||||||
callback(errors.InternalError);
|
callback(errors.InternalError);
|
||||||
});
|
});
|
||||||
|
@ -55,7 +55,7 @@ const multipleBackendGateway = {
|
||||||
log.debug('put to location', { controllingLocationConstraint });
|
log.debug('put to location', { controllingLocationConstraint });
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error from datastore',
|
log.error('error from datastore',
|
||||||
{ error: err, dataStoreType: client.clientType });
|
{ error: err, dataStoreType: client.clientType });
|
||||||
return callback(errors.ServiceUnavailable);
|
return callback(errors.ServiceUnavailable);
|
||||||
}
|
}
|
||||||
const dataRetrievalInfo = {
|
const dataRetrievalInfo = {
|
||||||
|
@ -156,29 +156,29 @@ const multipleBackendGateway = {
|
||||||
}, () => {
|
}, () => {
|
||||||
async.parallel([
|
async.parallel([
|
||||||
next => checkExternalBackend(
|
next => checkExternalBackend(
|
||||||
clients, awsArray, 'aws_s3', flightCheckOnStartUp,
|
clients, awsArray, 'aws_s3', flightCheckOnStartUp,
|
||||||
externalBackendHealthCheckInterval, next),
|
externalBackendHealthCheckInterval, next),
|
||||||
next => checkExternalBackend(
|
next => checkExternalBackend(
|
||||||
clients, azureArray, 'azure', flightCheckOnStartUp,
|
clients, azureArray, 'azure', flightCheckOnStartUp,
|
||||||
externalBackendHealthCheckInterval, next),
|
externalBackendHealthCheckInterval, next),
|
||||||
], (errNull, externalResp) => {
|
], (errNull, externalResp) => {
|
||||||
const externalLocResults = [];
|
const externalLocResults = [];
|
||||||
externalResp.forEach(resp => externalLocResults.push(...resp));
|
externalResp.forEach(resp => externalLocResults.push(...resp));
|
||||||
externalLocResults.forEach(locationResult =>
|
externalLocResults.forEach(locationResult =>
|
||||||
Object.assign(multBackendResp, locationResult));
|
Object.assign(multBackendResp, locationResult));
|
||||||
callback(null, multBackendResp);
|
callback(null, multBackendResp);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
createMPU: (key, metaHeaders, bucketName, websiteRedirectHeader,
|
createMPU: (key, metaHeaders, bucketName, websiteRedirectHeader,
|
||||||
location, contentType, cacheControl, contentDisposition,
|
location, contentType, cacheControl, contentDisposition,
|
||||||
contentEncoding, tagging, log, cb) => {
|
contentEncoding, tagging, log, cb) => {
|
||||||
const client = clients[location];
|
const client = clients[location];
|
||||||
if (client.clientType === 'aws_s3') {
|
if (client.clientType === 'aws_s3') {
|
||||||
return client.createMPU(key, metaHeaders, bucketName,
|
return client.createMPU(key, metaHeaders, bucketName,
|
||||||
websiteRedirectHeader, contentType, cacheControl,
|
websiteRedirectHeader, contentType, cacheControl,
|
||||||
contentDisposition, contentEncoding, tagging, log, cb);
|
contentDisposition, contentEncoding, tagging, log, cb);
|
||||||
}
|
}
|
||||||
return cb();
|
return cb();
|
||||||
},
|
},
|
||||||
|
@ -192,18 +192,18 @@ const multipleBackendGateway = {
|
||||||
},
|
},
|
||||||
|
|
||||||
uploadPart: (request, streamingV4Params, stream, size, location, key,
|
uploadPart: (request, streamingV4Params, stream, size, location, key,
|
||||||
uploadId, partNumber, bucketName, log, cb) => {
|
uploadId, partNumber, bucketName, log, cb) => {
|
||||||
const client = clients[location];
|
const client = clients[location];
|
||||||
|
|
||||||
if (client.uploadPart) {
|
if (client.uploadPart) {
|
||||||
return client.uploadPart(request, streamingV4Params, stream, size,
|
return client.uploadPart(request, streamingV4Params, stream, size,
|
||||||
key, uploadId, partNumber, bucketName, log, cb);
|
key, uploadId, partNumber, bucketName, log, cb);
|
||||||
}
|
}
|
||||||
return cb();
|
return cb();
|
||||||
},
|
},
|
||||||
|
|
||||||
listParts: (key, uploadId, location, bucketName, partNumberMarker, maxParts,
|
listParts: (key, uploadId, location, bucketName, partNumberMarker, maxParts,
|
||||||
log, cb) => {
|
log, cb) => {
|
||||||
const client = clients[location];
|
const client = clients[location];
|
||||||
|
|
||||||
if (client.listParts) {
|
if (client.listParts) {
|
||||||
|
@ -214,7 +214,7 @@ const multipleBackendGateway = {
|
||||||
},
|
},
|
||||||
|
|
||||||
completeMPU: (key, uploadId, location, jsonList, mdInfo, bucketName,
|
completeMPU: (key, uploadId, location, jsonList, mdInfo, bucketName,
|
||||||
userMetadata, contentSettings, log, cb) => {
|
userMetadata, contentSettings, log, cb) => {
|
||||||
const client = clients[location];
|
const client = clients[location];
|
||||||
if (client.completeMPU) {
|
if (client.completeMPU) {
|
||||||
const args = [jsonList, mdInfo, key, uploadId, bucketName];
|
const args = [jsonList, mdInfo, key, uploadId, bucketName];
|
||||||
|
@ -262,42 +262,42 @@ const multipleBackendGateway = {
|
||||||
// NOTE: using copyObject only if copying object from one external
|
// NOTE: using copyObject only if copying object from one external
|
||||||
// backend to the same external backend
|
// backend to the same external backend
|
||||||
copyObject: (request, destLocationConstraintName, externalSourceKey,
|
copyObject: (request, destLocationConstraintName, externalSourceKey,
|
||||||
sourceLocationConstraintName, storeMetadataParams, log, cb) => {
|
sourceLocationConstraintName, storeMetadataParams, log, cb) => {
|
||||||
const client = clients[destLocationConstraintName];
|
const client = clients[destLocationConstraintName];
|
||||||
if (client.copyObject) {
|
if (client.copyObject) {
|
||||||
return client.copyObject(request, destLocationConstraintName,
|
return client.copyObject(request, destLocationConstraintName,
|
||||||
externalSourceKey, sourceLocationConstraintName,
|
externalSourceKey, sourceLocationConstraintName,
|
||||||
storeMetadataParams, log, (err, key, dataStoreVersionId) => {
|
storeMetadataParams, log, (err, key, dataStoreVersionId) => {
|
||||||
const dataRetrievalInfo = {
|
const dataRetrievalInfo = {
|
||||||
key,
|
key,
|
||||||
dataStoreName: destLocationConstraintName,
|
dataStoreName: destLocationConstraintName,
|
||||||
dataStoreType: client.clientType,
|
dataStoreType: client.clientType,
|
||||||
dataStoreVersionId,
|
dataStoreVersionId,
|
||||||
};
|
};
|
||||||
cb(err, dataRetrievalInfo);
|
cb(err, dataRetrievalInfo);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
return cb(errors.NotImplemented
|
return cb(errors.NotImplemented
|
||||||
.customizeDescription('Can not copy object from ' +
|
.customizeDescription('Can not copy object from ' +
|
||||||
`${client.clientType} to ${client.clientType}`));
|
`${client.clientType} to ${client.clientType}`));
|
||||||
},
|
},
|
||||||
uploadPartCopy: (request, location, awsSourceKey,
|
uploadPartCopy: (request, location, awsSourceKey,
|
||||||
sourceLocationConstraintName, log, cb) => {
|
sourceLocationConstraintName, log, cb) => {
|
||||||
const client = clients[location];
|
const client = clients[location];
|
||||||
if (client.uploadPartCopy) {
|
if (client.uploadPartCopy) {
|
||||||
return client.uploadPartCopy(request, awsSourceKey,
|
return client.uploadPartCopy(request, awsSourceKey,
|
||||||
sourceLocationConstraintName,
|
sourceLocationConstraintName,
|
||||||
log, cb);
|
log, cb);
|
||||||
}
|
}
|
||||||
return cb(errors.NotImplemented.customizeDescription(
|
return cb(errors.NotImplemented.customizeDescription(
|
||||||
'Can not copy object from ' +
|
'Can not copy object from ' +
|
||||||
`${client.clientType} to ${client.clientType}`));
|
`${client.clientType} to ${client.clientType}`));
|
||||||
},
|
},
|
||||||
protectAzureBlocks: (bucketName, objectKey, location, log, cb) => {
|
protectAzureBlocks: (bucketName, objectKey, location, log, cb) => {
|
||||||
const client = clients[location];
|
const client = clients[location];
|
||||||
if (client.protectAzureBlocks) {
|
if (client.protectAzureBlocks) {
|
||||||
return client.protectAzureBlocks(bucketName, objectKey, location,
|
return client.protectAzureBlocks(bucketName, objectKey, location,
|
||||||
log, cb);
|
log, cb);
|
||||||
}
|
}
|
||||||
return cb();
|
return cb();
|
||||||
},
|
},
|
||||||
|
|
|
@ -1,26 +1,18 @@
|
||||||
const async = require('async');
|
const { storage } = require('arsenal');
|
||||||
const { errors, s3middleware } = require('arsenal');
|
|
||||||
const PassThrough = require('stream').PassThrough;
|
|
||||||
|
|
||||||
const DataFileInterface = require('./file/backend');
|
|
||||||
const inMemory = require('./in_memory/backend').backend;
|
|
||||||
const locationConstraintCheck =
|
|
||||||
require('../api/apiUtils/object/locationConstraintCheck');
|
|
||||||
const multipleBackendGateway = require('./multipleBackendGateway');
|
|
||||||
const utils = require('./external/utils');
|
|
||||||
const { config } = require('../Config');
|
const { config } = require('../Config');
|
||||||
const MD5Sum = s3middleware.MD5Sum;
|
|
||||||
const NullStream = s3middleware.NullStream;
|
|
||||||
const assert = require('assert');
|
|
||||||
const kms = require('../kms/wrapper');
|
const kms = require('../kms/wrapper');
|
||||||
const externalBackends = require('../../constants').externalBackends;
|
const metadata = require('../metadata/wrapper');
|
||||||
const constants = require('../../constants');
|
const vault = require('../auth/vault');
|
||||||
const { BackendInfo } = require('../api/apiUtils/object/BackendInfo');
|
const locationStorageCheck =
|
||||||
const RelayMD5Sum = require('../utilities/RelayMD5Sum');
|
require('../api/apiUtils/object/locationStorageCheck');
|
||||||
const skipError = new Error('skip');
|
const { DataWrapper, MultipleBackendGateway, parseLC } = storage.data;
|
||||||
|
const { DataFileInterface } = storage.data.file;
|
||||||
|
const inMemory = storage.data.inMemory.datastore.backend;
|
||||||
|
|
||||||
let CdmiData;
|
let CdmiData;
|
||||||
try {
|
try {
|
||||||
|
// eslint-disable-next-line import/no-unresolved
|
||||||
CdmiData = require('cdmiclient').CdmiData;
|
CdmiData = require('cdmiclient').CdmiData;
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
CdmiData = null;
|
CdmiData = null;
|
||||||
|
@ -33,10 +25,12 @@ if (config.backends.data === 'mem') {
|
||||||
client = inMemory;
|
client = inMemory;
|
||||||
implName = 'mem';
|
implName = 'mem';
|
||||||
} else if (config.backends.data === 'file') {
|
} else if (config.backends.data === 'file') {
|
||||||
client = new DataFileInterface();
|
client = new DataFileInterface(config);
|
||||||
implName = 'file';
|
implName = 'file';
|
||||||
} else if (config.backends.data === 'multiple') {
|
} else if (config.backends.data === 'multiple') {
|
||||||
client = multipleBackendGateway;
|
const clients = parseLC(config, vault);
|
||||||
|
client = new MultipleBackendGateway(
|
||||||
|
clients, metadata, locationStorageCheck);
|
||||||
implName = 'multipleBackends';
|
implName = 'multipleBackends';
|
||||||
} else if (config.backends.data === 'cdmi') {
|
} else if (config.backends.data === 'cdmi') {
|
||||||
if (!CdmiData) {
|
if (!CdmiData) {
|
||||||
|
@ -52,780 +46,16 @@ if (config.backends.data === 'mem') {
|
||||||
implName = 'cdmi';
|
implName = 'cdmi';
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
const data = new DataWrapper(
|
||||||
* _retryDelete - Attempt to delete key again if it failed previously
|
client, implName, config, kms, metadata, locationStorageCheck, vault);
|
||||||
* @param { string | object } objectGetInfo - either string location of object
|
|
||||||
* to delete or object containing info of object to delete
|
|
||||||
* @param {object} log - Werelogs request logger
|
|
||||||
* @param {number} count - keeps count of number of times function has been run
|
|
||||||
* @param {function} cb - callback
|
|
||||||
* @returns undefined and calls callback
|
|
||||||
*/
|
|
||||||
const MAX_RETRY = 2;
|
|
||||||
|
|
||||||
// This check is done because on a put, complete mpu or copy request to
|
|
||||||
// Azure/AWS, if the object already exists on that backend, the existing object
|
|
||||||
// should not be deleted, which is the functionality for all other backends
|
|
||||||
function _shouldSkipDelete(locations, requestMethod, newObjDataStoreName) {
|
|
||||||
const skipMethods = { PUT: true, POST: true };
|
|
||||||
if (!Array.isArray(locations) || !locations[0] ||
|
|
||||||
!locations[0].dataStoreType) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const isSkipBackend = externalBackends[locations[0].dataStoreType];
|
|
||||||
const isMatchingBackends =
|
|
||||||
locations[0].dataStoreName === newObjDataStoreName;
|
|
||||||
const isSkipMethod = skipMethods[requestMethod];
|
|
||||||
return (isSkipBackend && isMatchingBackends && isSkipMethod);
|
|
||||||
}
|
|
||||||
|
|
||||||
function _retryDelete(objectGetInfo, log, count, cb) {
|
|
||||||
if (count > MAX_RETRY) {
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
return client.delete(objectGetInfo, log.getSerializedUids(), err => {
|
|
||||||
if (err) {
|
|
||||||
if (err.ObjNotFound) {
|
|
||||||
log.info('no such key in datastore',
|
|
||||||
{ objectGetInfo, implName, moreRetries: 'no' });
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
log.error('delete error from datastore',
|
|
||||||
{ error: err, implName, moreRetries: 'yes' });
|
|
||||||
return _retryDelete(objectGetInfo, log, count + 1, cb);
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
function _put(cipherBundle, value, valueSize,
|
|
||||||
keyContext, backendInfo, log, cb) {
|
|
||||||
assert.strictEqual(typeof valueSize, 'number');
|
|
||||||
log.debug('sending put to datastore', { implName, keyContext,
|
|
||||||
method: 'put' });
|
|
||||||
let hashedStream = null;
|
|
||||||
if (value) {
|
|
||||||
hashedStream = new MD5Sum();
|
|
||||||
value.pipe(hashedStream);
|
|
||||||
value.once('clientError', () => {
|
|
||||||
log.trace('destroying hashed stream');
|
|
||||||
hashedStream.destroy();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
|
config.on('location-constraints-update', () => {
|
||||||
if (implName === 'multipleBackends') {
|
if (implName === 'multipleBackends') {
|
||||||
// Need to send backendInfo to client.put and
|
const clients = parseLC(config, vault);
|
||||||
// client.put will provide dataRetrievalInfo so no
|
client = new MultipleBackendGateway(
|
||||||
// need to construct here
|
clients, metadata, locationStorageCheck);
|
||||||
/* eslint-disable no-param-reassign */
|
data.switch(client);
|
||||||
keyContext.cipherBundle = cipherBundle;
|
|
||||||
return client.put(hashedStream,
|
|
||||||
valueSize, keyContext, backendInfo, log.getSerializedUids(),
|
|
||||||
(err, dataRetrievalInfo) => {
|
|
||||||
if (err) {
|
|
||||||
log.error('put error from datastore',
|
|
||||||
{ error: err, implName });
|
|
||||||
if (err.httpCode === 408) {
|
|
||||||
return cb(errors.IncompleteBody);
|
|
||||||
}
|
|
||||||
return cb(errors.ServiceUnavailable);
|
|
||||||
}
|
|
||||||
return cb(null, dataRetrievalInfo, hashedStream);
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
/* eslint-enable no-param-reassign */
|
});
|
||||||
|
|
||||||
let writeStream = hashedStream;
|
module.exports = { data, client, implName };
|
||||||
if (cipherBundle && cipherBundle.cipher) {
|
|
||||||
writeStream = cipherBundle.cipher;
|
|
||||||
hashedStream.pipe(writeStream);
|
|
||||||
}
|
|
||||||
|
|
||||||
return client.put(writeStream, valueSize, keyContext,
|
|
||||||
log.getSerializedUids(), (err, key) => {
|
|
||||||
if (err) {
|
|
||||||
log.error('put error from datastore',
|
|
||||||
{ error: err, implName });
|
|
||||||
if (err.httpCode === 408) {
|
|
||||||
return cb(errors.IncompleteBody);
|
|
||||||
}
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
const dataRetrievalInfo = {
|
|
||||||
key,
|
|
||||||
dataStoreName: implName,
|
|
||||||
};
|
|
||||||
return cb(null, dataRetrievalInfo, hashedStream);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
const data = {
|
|
||||||
put: (cipherBundle, value, valueSize, keyContext, backendInfo, log, cb) => {
|
|
||||||
_put(cipherBundle, value, valueSize, keyContext, backendInfo, log,
|
|
||||||
(err, dataRetrievalInfo, hashedStream) => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
if (hashedStream) {
|
|
||||||
if (hashedStream.completedHash) {
|
|
||||||
return cb(null, dataRetrievalInfo, hashedStream);
|
|
||||||
}
|
|
||||||
hashedStream.on('hashed', () => {
|
|
||||||
hashedStream.removeAllListeners('hashed');
|
|
||||||
return cb(null, dataRetrievalInfo, hashedStream);
|
|
||||||
});
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
return cb(null, dataRetrievalInfo);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
head: (objectGetInfo, log, cb) => {
|
|
||||||
if (implName !== 'multipleBackends') {
|
|
||||||
// no-op if not multipleBackend implementation;
|
|
||||||
// head is used during get just to check external backend data state
|
|
||||||
return process.nextTick(cb);
|
|
||||||
}
|
|
||||||
return client.head(objectGetInfo, log.getSerializedUids(), cb);
|
|
||||||
},
|
|
||||||
|
|
||||||
get: (objectGetInfo, response, log, cb) => {
|
|
||||||
const isMdModelVersion2 = typeof(objectGetInfo) === 'string';
|
|
||||||
const isRequiredStringKey = constants.clientsRequireStringKey[implName];
|
|
||||||
const key = isMdModelVersion2 ? objectGetInfo : objectGetInfo.key;
|
|
||||||
const clientGetInfo = isRequiredStringKey ? key : objectGetInfo;
|
|
||||||
const range = objectGetInfo.range;
|
|
||||||
|
|
||||||
// If the key is explicitly set to null, the part to
|
|
||||||
// be read doesn't really exist and is only made of zeroes.
|
|
||||||
// This functionality is used by Scality-NFSD.
|
|
||||||
// Otherwise, the key is always defined
|
|
||||||
assert(key === null || key !== undefined);
|
|
||||||
if (key === null) {
|
|
||||||
cb(null, new NullStream(objectGetInfo.size, range));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
log.debug('sending get to datastore', { implName,
|
|
||||||
key, range, method: 'get' });
|
|
||||||
// We need to use response as a writable stream for AZURE GET
|
|
||||||
if (!isMdModelVersion2 && !isRequiredStringKey && response) {
|
|
||||||
clientGetInfo.response = response;
|
|
||||||
}
|
|
||||||
client.get(clientGetInfo, range, log.getSerializedUids(),
|
|
||||||
(err, stream) => {
|
|
||||||
if (err) {
|
|
||||||
log.error('get error from datastore',
|
|
||||||
{ error: err, implName });
|
|
||||||
return cb(errors.ServiceUnavailable);
|
|
||||||
}
|
|
||||||
if (objectGetInfo.cipheredDataKey) {
|
|
||||||
const serverSideEncryption = {
|
|
||||||
cryptoScheme: objectGetInfo.cryptoScheme,
|
|
||||||
masterKeyId: objectGetInfo.masterKeyId,
|
|
||||||
cipheredDataKey: Buffer.from(
|
|
||||||
objectGetInfo.cipheredDataKey, 'base64'),
|
|
||||||
};
|
|
||||||
const offset = objectGetInfo.range ?
|
|
||||||
objectGetInfo.range[0] : 0;
|
|
||||||
return kms.createDecipherBundle(
|
|
||||||
serverSideEncryption, offset, log,
|
|
||||||
(err, decipherBundle) => {
|
|
||||||
if (err) {
|
|
||||||
log.error('cannot get decipher bundle ' +
|
|
||||||
'from kms', {
|
|
||||||
method: 'data.wrapper.data.get',
|
|
||||||
});
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
stream.pipe(decipherBundle.decipher);
|
|
||||||
return cb(null, decipherBundle.decipher);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return cb(null, stream);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
delete: (objectGetInfo, log, cb) => {
|
|
||||||
const callback = cb || log.end;
|
|
||||||
const isMdModelVersion2 = typeof(objectGetInfo) === 'string';
|
|
||||||
const isRequiredStringKey = constants.clientsRequireStringKey[implName];
|
|
||||||
const key = isMdModelVersion2 ? objectGetInfo : objectGetInfo.key;
|
|
||||||
const clientGetInfo = isRequiredStringKey ? key : objectGetInfo;
|
|
||||||
|
|
||||||
log.trace('sending delete to datastore', {
|
|
||||||
implName, key, method: 'delete' });
|
|
||||||
// If the key is explicitly set to null, the part to
|
|
||||||
// be deleted doesn't really exist.
|
|
||||||
// This functionality is used by Scality-NFSD.
|
|
||||||
// Otherwise, the key is always defined
|
|
||||||
assert(key === null || key !== undefined);
|
|
||||||
if (key === null) {
|
|
||||||
callback(null);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
_retryDelete(clientGetInfo, log, 0, err => {
|
|
||||||
if (err && !err.ObjNotFound) {
|
|
||||||
log.error('delete error from datastore',
|
|
||||||
{ error: err, key: objectGetInfo.key, moreRetries: 'no' });
|
|
||||||
}
|
|
||||||
return callback(err);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
batchDelete: (locations, requestMethod, newObjDataStoreName, log, cb) => {
|
|
||||||
// TODO: The method of persistence of sproxy delete key will
|
|
||||||
// be finalized; refer Issue #312 for the discussion. In the
|
|
||||||
// meantime, we at least log the location of the data we are
|
|
||||||
// about to delete before attempting its deletion.
|
|
||||||
if (_shouldSkipDelete(locations, requestMethod, newObjDataStoreName)) {
|
|
||||||
return process.nextTick(cb);
|
|
||||||
}
|
|
||||||
log.trace('initiating batch delete', {
|
|
||||||
keys: locations,
|
|
||||||
implName,
|
|
||||||
method: 'batchDelete',
|
|
||||||
});
|
|
||||||
const keys = [];
|
|
||||||
let backendName = '';
|
|
||||||
const shouldBatchDelete = locations.every(l => {
|
|
||||||
// legacy sproxyd location, should fallback to using regular delete
|
|
||||||
if (typeof l === 'string') {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
const { dataStoreName, key } = l;
|
|
||||||
backendName = dataStoreName;
|
|
||||||
const type = config.getLocationConstraintType(dataStoreName);
|
|
||||||
// filter out possible `null` created by NFS
|
|
||||||
if (key && type === 'scality') {
|
|
||||||
keys.push(key);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
});
|
|
||||||
if (shouldBatchDelete) {
|
|
||||||
return client.batchDelete(backendName, { keys }, log, cb);
|
|
||||||
}
|
|
||||||
return async.eachLimit(locations, 5, (loc, next) => {
|
|
||||||
process.nextTick(() => data.delete(loc, log, next));
|
|
||||||
},
|
|
||||||
err => {
|
|
||||||
if (err) {
|
|
||||||
log.end().error('batch delete failed', { error: err });
|
|
||||||
// deletion of non-existing objects result in 204
|
|
||||||
if (err.code === 404) {
|
|
||||||
return cb();
|
|
||||||
}
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
log.end().trace('batch delete successfully completed');
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
switch: newClient => {
|
|
||||||
client = newClient;
|
|
||||||
return client;
|
|
||||||
},
|
|
||||||
|
|
||||||
checkHealth: (log, cb, flightCheckOnStartUp) => {
|
|
||||||
if (!client.healthcheck) {
|
|
||||||
const defResp = {};
|
|
||||||
defResp[implName] = { code: 200, message: 'OK' };
|
|
||||||
return cb(null, defResp);
|
|
||||||
}
|
|
||||||
return client.healthcheck(flightCheckOnStartUp, log, (err, result) => {
|
|
||||||
let respBody = {};
|
|
||||||
if (err) {
|
|
||||||
log.error(`error from ${implName}`, { error: err });
|
|
||||||
respBody[implName] = {
|
|
||||||
error: err,
|
|
||||||
};
|
|
||||||
// error returned as null so async parallel doesn't return
|
|
||||||
// before all backends are checked
|
|
||||||
return cb(null, respBody);
|
|
||||||
}
|
|
||||||
if (implName === 'multipleBackends') {
|
|
||||||
respBody = result;
|
|
||||||
return cb(null, respBody);
|
|
||||||
}
|
|
||||||
respBody[implName] = {
|
|
||||||
code: result.statusCode,
|
|
||||||
message: result.statusMessage,
|
|
||||||
};
|
|
||||||
return cb(null, respBody);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
getDiskUsage: (log, cb) => {
|
|
||||||
if (!client.getDiskUsage) {
|
|
||||||
log.debug('returning empty disk usage as fallback', { implName });
|
|
||||||
return cb(null, {});
|
|
||||||
}
|
|
||||||
return client.getDiskUsage(log.getSerializedUids(), cb);
|
|
||||||
},
|
|
||||||
|
|
||||||
|
|
||||||
/**
|
|
||||||
* _putForCopy - put used for copying object
|
|
||||||
* @param {object} cipherBundle - cipher bundle that encrypt the data
|
|
||||||
* @param {object} stream - stream containing the data
|
|
||||||
* @param {object} part - element of dataLocator array
|
|
||||||
* @param {object} dataStoreContext - information of the
|
|
||||||
* destination object
|
|
||||||
* dataStoreContext.bucketName: destination bucket name,
|
|
||||||
* dataStoreContext.owner: owner,
|
|
||||||
* dataStoreContext.namespace: request namespace,
|
|
||||||
* dataStoreContext.objectKey: destination object key name,
|
|
||||||
* @param {BackendInfo} destBackendInfo - Instance of BackendInfo:
|
|
||||||
* Represents the info necessary to evaluate which data backend to use
|
|
||||||
* on a data put call.
|
|
||||||
* @param {object} log - Werelogs request logger
|
|
||||||
* @param {function} cb - callback
|
|
||||||
* @returns {function} cb - callback
|
|
||||||
*/
|
|
||||||
_putForCopy: (cipherBundle, stream, part, dataStoreContext,
|
|
||||||
destBackendInfo, log, cb) => data.put(cipherBundle, stream,
|
|
||||||
part.size, dataStoreContext,
|
|
||||||
destBackendInfo, log,
|
|
||||||
(error, partRetrievalInfo) => {
|
|
||||||
if (error) {
|
|
||||||
return cb(error);
|
|
||||||
}
|
|
||||||
const partResult = {
|
|
||||||
key: partRetrievalInfo.key,
|
|
||||||
dataStoreName: partRetrievalInfo
|
|
||||||
.dataStoreName,
|
|
||||||
dataStoreType: partRetrievalInfo
|
|
||||||
.dataStoreType,
|
|
||||||
start: part.start,
|
|
||||||
size: part.size,
|
|
||||||
};
|
|
||||||
if (cipherBundle) {
|
|
||||||
partResult.cryptoScheme = cipherBundle.cryptoScheme;
|
|
||||||
partResult.cipheredDataKey = cipherBundle.cipheredDataKey;
|
|
||||||
}
|
|
||||||
if (part.dataStoreETag) {
|
|
||||||
partResult.dataStoreETag = part.dataStoreETag;
|
|
||||||
}
|
|
||||||
if (partRetrievalInfo.dataStoreVersionId) {
|
|
||||||
partResult.dataStoreVersionId =
|
|
||||||
partRetrievalInfo.dataStoreVersionId;
|
|
||||||
}
|
|
||||||
return cb(null, partResult);
|
|
||||||
}),
|
|
||||||
|
|
||||||
/**
|
|
||||||
* _dataCopyPut - put used for copying object with and without
|
|
||||||
* encryption
|
|
||||||
* @param {string} serverSideEncryption - Server side encryption
|
|
||||||
* @param {object} stream - stream containing the data
|
|
||||||
* @param {object} part - element of dataLocator array
|
|
||||||
* @param {object} dataStoreContext - information of the
|
|
||||||
* destination object
|
|
||||||
* dataStoreContext.bucketName: destination bucket name,
|
|
||||||
* dataStoreContext.owner: owner,
|
|
||||||
* dataStoreContext.namespace: request namespace,
|
|
||||||
* dataStoreContext.objectKey: destination object key name,
|
|
||||||
* @param {BackendInfo} destBackendInfo - Instance of BackendInfo:
|
|
||||||
* Represents the info necessary to evaluate which data backend to use
|
|
||||||
* on a data put call.
|
|
||||||
* @param {object} log - Werelogs request logger
|
|
||||||
* @param {function} cb - callback
|
|
||||||
* @returns {function} cb - callback
|
|
||||||
*/
|
|
||||||
_dataCopyPut: (serverSideEncryption, stream, part, dataStoreContext,
|
|
||||||
destBackendInfo, log, cb) => {
|
|
||||||
if (serverSideEncryption) {
|
|
||||||
return kms.createCipherBundle(
|
|
||||||
serverSideEncryption,
|
|
||||||
log, (err, cipherBundle) => {
|
|
||||||
if (err) {
|
|
||||||
log.debug('error getting cipherBundle');
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
return data._putForCopy(cipherBundle, stream, part,
|
|
||||||
dataStoreContext, destBackendInfo, log, cb);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
// Copied object is not encrypted so just put it
|
|
||||||
// without a cipherBundle
|
|
||||||
return data._putForCopy(null, stream, part, dataStoreContext,
|
|
||||||
destBackendInfo, log, cb);
|
|
||||||
},
|
|
||||||
|
|
||||||
/**
|
|
||||||
* copyObject - copy object
|
|
||||||
* @param {object} request - request object
|
|
||||||
* @param {string} sourceLocationConstraintName -
|
|
||||||
* source locationContraint name (awsbackend, azurebackend, ...)
|
|
||||||
* @param {object} storeMetadataParams - metadata information of the
|
|
||||||
* source object
|
|
||||||
* @param {array} dataLocator - source object metadata location(s)
|
|
||||||
* NOTE: for Azure and AWS data backend this array only has one item
|
|
||||||
* @param {object} dataStoreContext - information of the
|
|
||||||
* destination object
|
|
||||||
* dataStoreContext.bucketName: destination bucket name,
|
|
||||||
* dataStoreContext.owner: owner,
|
|
||||||
* dataStoreContext.namespace: request namespace,
|
|
||||||
* dataStoreContext.objectKey: destination object key name,
|
|
||||||
* @param {BackendInfo} destBackendInfo - Instance of BackendInfo:
|
|
||||||
* Represents the info necessary to evaluate which data backend to use
|
|
||||||
* on a data put call.
|
|
||||||
* @param {object} sourceBucketMD - metadata of the source bucket
|
|
||||||
* @param {object} destBucketMD - metadata of the destination bucket
|
|
||||||
* @param {object} serverSideEncryption - server side encryption configuration
|
|
||||||
* @param {object} log - Werelogs request logger
|
|
||||||
* @param {function} cb - callback
|
|
||||||
* @returns {function} cb - callback
|
|
||||||
*/
|
|
||||||
copyObject: (request,
|
|
||||||
sourceLocationConstraintName, storeMetadataParams, dataLocator,
|
|
||||||
dataStoreContext, destBackendInfo, sourceBucketMD, destBucketMD,
|
|
||||||
serverSideEncryption, log, cb) => {
|
|
||||||
if (config.backends.data === 'multiple' &&
|
|
||||||
utils.externalBackendCopy(sourceLocationConstraintName,
|
|
||||||
storeMetadataParams.dataStoreName, sourceBucketMD, destBucketMD)
|
|
||||||
&& serverSideEncryption === null) {
|
|
||||||
const destLocationConstraintName =
|
|
||||||
storeMetadataParams.dataStoreName;
|
|
||||||
const objectGetInfo = dataLocator[0];
|
|
||||||
const externalSourceKey = objectGetInfo.key;
|
|
||||||
return client.copyObject(request, destLocationConstraintName,
|
|
||||||
externalSourceKey, sourceLocationConstraintName,
|
|
||||||
storeMetadataParams, log, (error, objectRetrievalInfo) => {
|
|
||||||
if (error) {
|
|
||||||
return cb(error);
|
|
||||||
}
|
|
||||||
const putResult = {
|
|
||||||
key: objectRetrievalInfo.key,
|
|
||||||
dataStoreName: objectRetrievalInfo.
|
|
||||||
dataStoreName,
|
|
||||||
dataStoreType: objectRetrievalInfo.
|
|
||||||
dataStoreType,
|
|
||||||
dataStoreVersionId:
|
|
||||||
objectRetrievalInfo.dataStoreVersionId,
|
|
||||||
size: storeMetadataParams.size,
|
|
||||||
dataStoreETag: objectGetInfo.dataStoreETag,
|
|
||||||
start: objectGetInfo.start,
|
|
||||||
};
|
|
||||||
const putResultArr = [putResult];
|
|
||||||
return cb(null, putResultArr);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// dataLocator is an array. need to get and put all parts
|
|
||||||
// For now, copy 1 part at a time. Could increase the second
|
|
||||||
// argument here to increase the number of parts
|
|
||||||
// copied at once.
|
|
||||||
return async.mapLimit(dataLocator, 1,
|
|
||||||
// eslint-disable-next-line prefer-arrow-callback
|
|
||||||
function copyPart(part, copyCb) {
|
|
||||||
if (part.dataStoreType === 'azure') {
|
|
||||||
const passThrough = new PassThrough();
|
|
||||||
return async.parallel([
|
|
||||||
parallelCb => data.get(part, passThrough, log, err =>
|
|
||||||
parallelCb(err)),
|
|
||||||
parallelCb => data._dataCopyPut(serverSideEncryption,
|
|
||||||
passThrough,
|
|
||||||
part, dataStoreContext, destBackendInfo, log,
|
|
||||||
parallelCb),
|
|
||||||
], (err, res) => {
|
|
||||||
if (err) {
|
|
||||||
return copyCb(err);
|
|
||||||
}
|
|
||||||
return copyCb(null, res[1]);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return data.get(part, null, log, (err, stream) => {
|
|
||||||
if (err) {
|
|
||||||
return copyCb(err);
|
|
||||||
}
|
|
||||||
return data._dataCopyPut(serverSideEncryption, stream,
|
|
||||||
part, dataStoreContext, destBackendInfo, log, copyCb);
|
|
||||||
});
|
|
||||||
}, (err, results) => {
|
|
||||||
if (err) {
|
|
||||||
log.debug('error transferring data from source',
|
|
||||||
{ error: err });
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
return cb(null, results);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
|
|
||||||
_dataCopyPutPart: (request,
|
|
||||||
serverSideEncryption, stream, part,
|
|
||||||
dataStoreContext, destBackendInfo, locations, log, cb) => {
|
|
||||||
const numberPartSize =
|
|
||||||
Number.parseInt(part.size, 10);
|
|
||||||
const partNumber = Number.parseInt(request.query.partNumber, 10);
|
|
||||||
const uploadId = request.query.uploadId;
|
|
||||||
const destObjectKey = request.objectKey;
|
|
||||||
const destBucketName = request.bucketName;
|
|
||||||
const destLocationConstraintName = destBackendInfo
|
|
||||||
.getControllingLocationConstraint();
|
|
||||||
if (externalBackends[config
|
|
||||||
.locationConstraints[destLocationConstraintName]
|
|
||||||
.type]) {
|
|
||||||
return multipleBackendGateway.uploadPart(null, null,
|
|
||||||
stream, numberPartSize,
|
|
||||||
destLocationConstraintName, destObjectKey, uploadId,
|
|
||||||
partNumber, destBucketName, log,
|
|
||||||
(err, partInfo) => {
|
|
||||||
if (err) {
|
|
||||||
log.error('error putting ' +
|
|
||||||
'part to AWS', {
|
|
||||||
error: err,
|
|
||||||
method:
|
|
||||||
'objectPutCopyPart::' +
|
|
||||||
'multipleBackendGateway.' +
|
|
||||||
'uploadPart',
|
|
||||||
});
|
|
||||||
return cb(errors.ServiceUnavailable);
|
|
||||||
}
|
|
||||||
// skip to end of waterfall
|
|
||||||
// because don't need to store
|
|
||||||
// part metadata
|
|
||||||
if (partInfo &&
|
|
||||||
partInfo.dataStoreType === 'aws_s3') {
|
|
||||||
// if data backend handles MPU, skip to end
|
|
||||||
// of waterfall
|
|
||||||
const partResult = {
|
|
||||||
dataStoreETag: partInfo.dataStoreETag,
|
|
||||||
};
|
|
||||||
locations.push(partResult);
|
|
||||||
return cb(skipError, partInfo.dataStoreETag);
|
|
||||||
} else if (
|
|
||||||
partInfo &&
|
|
||||||
partInfo.dataStoreType === 'azure') {
|
|
||||||
const partResult = {
|
|
||||||
key: partInfo.key,
|
|
||||||
dataStoreName: partInfo.dataStoreName,
|
|
||||||
dataStoreETag: partInfo.dataStoreETag,
|
|
||||||
size: numberPartSize,
|
|
||||||
numberSubParts:
|
|
||||||
partInfo.numberSubParts,
|
|
||||||
partNumber: partInfo.partNumber,
|
|
||||||
};
|
|
||||||
locations.push(partResult);
|
|
||||||
return cb();
|
|
||||||
}
|
|
||||||
return cb(skipError);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
if (serverSideEncryption) {
|
|
||||||
return kms.createCipherBundle(
|
|
||||||
serverSideEncryption,
|
|
||||||
log, (err, cipherBundle) => {
|
|
||||||
if (err) {
|
|
||||||
log.debug('error getting cipherBundle',
|
|
||||||
{ error: err });
|
|
||||||
return cb(errors.InternalError);
|
|
||||||
}
|
|
||||||
return data.put(cipherBundle, stream,
|
|
||||||
numberPartSize, dataStoreContext,
|
|
||||||
destBackendInfo, log,
|
|
||||||
(error, partRetrievalInfo,
|
|
||||||
hashedStream) => {
|
|
||||||
if (error) {
|
|
||||||
log.debug('error putting ' +
|
|
||||||
'encrypted part', { error });
|
|
||||||
return cb(error);
|
|
||||||
}
|
|
||||||
const partResult = {
|
|
||||||
key: partRetrievalInfo.key,
|
|
||||||
dataStoreName: partRetrievalInfo
|
|
||||||
.dataStoreName,
|
|
||||||
dataStoreETag: hashedStream
|
|
||||||
.completedHash,
|
|
||||||
// Do not include part start
|
|
||||||
// here since will change in
|
|
||||||
// final MPU object
|
|
||||||
size: numberPartSize,
|
|
||||||
sseCryptoScheme: cipherBundle
|
|
||||||
.cryptoScheme,
|
|
||||||
sseCipheredDataKey: cipherBundle
|
|
||||||
.cipheredDataKey,
|
|
||||||
sseAlgorithm: cipherBundle
|
|
||||||
.algorithm,
|
|
||||||
sseMasterKeyId: cipherBundle
|
|
||||||
.masterKeyId,
|
|
||||||
};
|
|
||||||
locations.push(partResult);
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
// Copied object is not encrypted so just put it
|
|
||||||
// without a cipherBundle
|
|
||||||
return data.put(null, stream, numberPartSize,
|
|
||||||
dataStoreContext, destBackendInfo,
|
|
||||||
log, (error, partRetrievalInfo, hashedStream) => {
|
|
||||||
if (error) {
|
|
||||||
log.debug('error putting object part',
|
|
||||||
{ error });
|
|
||||||
return cb(error);
|
|
||||||
}
|
|
||||||
const partResult = {
|
|
||||||
key: partRetrievalInfo.key,
|
|
||||||
dataStoreName: partRetrievalInfo.dataStoreName,
|
|
||||||
dataStoreETag: hashedStream.completedHash,
|
|
||||||
size: numberPartSize,
|
|
||||||
};
|
|
||||||
locations.push(partResult);
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
},
|
|
||||||
|
|
||||||
/**
|
|
||||||
* uploadPartCopy - put copy part
|
|
||||||
* @param {object} request - request object
|
|
||||||
* @param {object} log - Werelogs request logger
|
|
||||||
* @param {object} destBucketMD - destination bucket metadata
|
|
||||||
* @param {string} sourceLocationConstraintName -
|
|
||||||
* source locationContraint name (awsbackend, azurebackend, ...)
|
|
||||||
* @param {string} destLocationConstraintName -
|
|
||||||
* location of the destination MPU object (awsbackend, azurebackend, ...)
|
|
||||||
* @param {array} dataLocator - source object metadata location(s)
|
|
||||||
* NOTE: for Azure and AWS data backend this array
|
|
||||||
* @param {object} dataStoreContext - information of the
|
|
||||||
* destination object
|
|
||||||
* dataStoreContext.bucketName: destination bucket name,
|
|
||||||
* dataStoreContext.owner: owner,
|
|
||||||
* dataStoreContext.namespace: request namespace,
|
|
||||||
* dataStoreContext.objectKey: destination object key name,
|
|
||||||
* dataStoreContext.uploadId: uploadId
|
|
||||||
* dataStoreContext.partNumber: request.query.partNumber
|
|
||||||
* @param {function} callback - callback
|
|
||||||
* @returns {function} cb - callback
|
|
||||||
*/
|
|
||||||
uploadPartCopy: (request, log, destBucketMD, sourceLocationConstraintName,
|
|
||||||
destLocationConstraintName, dataLocator, dataStoreContext,
|
|
||||||
callback) => {
|
|
||||||
const serverSideEncryption = destBucketMD.getServerSideEncryption();
|
|
||||||
const lastModified = new Date().toJSON();
|
|
||||||
|
|
||||||
// skip if 0 byte object
|
|
||||||
if (dataLocator.length === 0) {
|
|
||||||
return process.nextTick(() => {
|
|
||||||
callback(null, constants.emptyFileMd5,
|
|
||||||
lastModified, serverSideEncryption, []);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// if destination mpu was initiated in legacy version
|
|
||||||
if (destLocationConstraintName === undefined) {
|
|
||||||
const backendInfoObj = locationConstraintCheck(request,
|
|
||||||
null, destBucketMD, log);
|
|
||||||
if (backendInfoObj.err) {
|
|
||||||
return process.nextTick(() => {
|
|
||||||
callback(backendInfoObj.err);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
destLocationConstraintName = backendInfoObj.controllingLC;
|
|
||||||
}
|
|
||||||
|
|
||||||
const locationTypeMatchAWS =
|
|
||||||
config.backends.data === 'multiple' &&
|
|
||||||
config.getLocationConstraintType(sourceLocationConstraintName) ===
|
|
||||||
config.getLocationConstraintType(destLocationConstraintName) &&
|
|
||||||
config.getLocationConstraintType(sourceLocationConstraintName) ===
|
|
||||||
'aws_s3';
|
|
||||||
|
|
||||||
// NOTE: using multipleBackendGateway.uploadPartCopy only if copying
|
|
||||||
// from AWS to AWS
|
|
||||||
|
|
||||||
if (locationTypeMatchAWS && dataLocator.length === 1) {
|
|
||||||
const awsSourceKey = dataLocator[0].key;
|
|
||||||
return multipleBackendGateway.uploadPartCopy(request,
|
|
||||||
destLocationConstraintName, awsSourceKey,
|
|
||||||
sourceLocationConstraintName, log, (error, eTag) => {
|
|
||||||
if (error) {
|
|
||||||
return callback(error);
|
|
||||||
}
|
|
||||||
return callback(skipError, eTag,
|
|
||||||
lastModified, serverSideEncryption);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
const backendInfo = new BackendInfo(destLocationConstraintName);
|
|
||||||
|
|
||||||
// totalHash will be sent through the RelayMD5Sum transform streams
|
|
||||||
// to collect the md5 from multiple streams
|
|
||||||
let totalHash;
|
|
||||||
const locations = [];
|
|
||||||
// dataLocator is an array. need to get and put all parts
|
|
||||||
// in order so can get the ETag of full object
|
|
||||||
return async.forEachOfSeries(dataLocator,
|
|
||||||
// eslint-disable-next-line prefer-arrow-callback
|
|
||||||
function copyPart(part, index, cb) {
|
|
||||||
if (part.dataStoreType === 'azure') {
|
|
||||||
const passThrough = new PassThrough();
|
|
||||||
return async.parallel([
|
|
||||||
next => data.get(part, passThrough, log, err => {
|
|
||||||
if (err) {
|
|
||||||
log.error('error getting data part ' +
|
|
||||||
'from Azure', {
|
|
||||||
error: err,
|
|
||||||
method:
|
|
||||||
'objectPutCopyPart::' +
|
|
||||||
'multipleBackendGateway.' +
|
|
||||||
'copyPart',
|
|
||||||
});
|
|
||||||
return next(err);
|
|
||||||
}
|
|
||||||
return next();
|
|
||||||
}),
|
|
||||||
next => data._dataCopyPutPart(request,
|
|
||||||
serverSideEncryption, passThrough, part,
|
|
||||||
dataStoreContext, backendInfo, locations, log, next),
|
|
||||||
], err => {
|
|
||||||
if (err) {
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return data.get(part, null, log, (err, stream) => {
|
|
||||||
if (err) {
|
|
||||||
log.debug('error getting object part',
|
|
||||||
{ error: err });
|
|
||||||
return cb(err);
|
|
||||||
}
|
|
||||||
const hashedStream =
|
|
||||||
new RelayMD5Sum(totalHash, updatedHash => {
|
|
||||||
totalHash = updatedHash;
|
|
||||||
});
|
|
||||||
stream.pipe(hashedStream);
|
|
||||||
|
|
||||||
// destLocationConstraintName is location of the
|
|
||||||
// destination MPU object
|
|
||||||
return data._dataCopyPutPart(request,
|
|
||||||
serverSideEncryption, hashedStream, part,
|
|
||||||
dataStoreContext, backendInfo, locations, log, cb);
|
|
||||||
});
|
|
||||||
}, err => {
|
|
||||||
// Digest the final combination of all of the part streams
|
|
||||||
if (err && err !== skipError) {
|
|
||||||
log.debug('error transferring data from source',
|
|
||||||
{ error: err, method: 'goGetData' });
|
|
||||||
return callback(err);
|
|
||||||
}
|
|
||||||
if (totalHash) {
|
|
||||||
totalHash = totalHash.digest('hex');
|
|
||||||
} else {
|
|
||||||
totalHash = locations[0].dataStoreETag;
|
|
||||||
}
|
|
||||||
if (err && err === skipError) {
|
|
||||||
return callback(skipError, totalHash,
|
|
||||||
lastModified, serverSideEncryption);
|
|
||||||
}
|
|
||||||
return callback(null, totalHash,
|
|
||||||
lastModified, serverSideEncryption, locations);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
module.exports = data;
|
|
||||||
|
|
|
@ -57,7 +57,7 @@ class Common {
|
||||||
return newIV;
|
return newIV;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Derive key to use in cipher
|
* Derive key to use in cipher
|
||||||
* @param {number} cryptoScheme - cryptoScheme being used
|
* @param {number} cryptoScheme - cryptoScheme being used
|
||||||
* @param {buffer} dataKey - the unencrypted key (either from the
|
* @param {buffer} dataKey - the unencrypted key (either from the
|
||||||
|
@ -84,7 +84,7 @@ class Common {
|
||||||
this._keySize(), 'sha1', (err, derivedKey) => {
|
this._keySize(), 'sha1', (err, derivedKey) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('pbkdf2 function failed on key derivation',
|
log.error('pbkdf2 function failed on key derivation',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
cb(errors.InternalError);
|
cb(errors.InternalError);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -107,7 +107,7 @@ class Common {
|
||||||
return cb(errors.InternalError);
|
return cb(errors.InternalError);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* createDecipher
|
* createDecipher
|
||||||
* @param {number} cryptoScheme - cryptoScheme being used
|
* @param {number} cryptoScheme - cryptoScheme being used
|
||||||
* @param {buffer} dataKey - the unencrypted key (either from the
|
* @param {buffer} dataKey - the unencrypted key (either from the
|
||||||
|
@ -131,7 +131,7 @@ class Common {
|
||||||
const toSkip = offset % aesBlockSize;
|
const toSkip = offset % aesBlockSize;
|
||||||
const iv = this._incrementIV(derivedIV, blocks);
|
const iv = this._incrementIV(derivedIV, blocks);
|
||||||
const cipher = crypto.createDecipheriv(this._algorithm(),
|
const cipher = crypto.createDecipheriv(this._algorithm(),
|
||||||
derivedKey, iv);
|
derivedKey, iv);
|
||||||
if (toSkip) {
|
if (toSkip) {
|
||||||
/* Above, we advanced to the latest boundary not
|
/* Above, we advanced to the latest boundary not
|
||||||
greater than the offset amount. Here we advance by
|
greater than the offset amount. Here we advance by
|
||||||
|
|
|
@ -40,7 +40,7 @@ const backend = {
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @param {number} cryptoScheme - crypto scheme version number
|
* @param {number} cryptoScheme - crypto scheme version number
|
||||||
* @param {string} masterKeyId - master key; for the file backend
|
* @param {string} masterKeyId - master key; for the file backend
|
||||||
|
@ -53,10 +53,10 @@ const backend = {
|
||||||
* @callback called with (err, cipheredDataKey: Buffer)
|
* @callback called with (err, cipheredDataKey: Buffer)
|
||||||
*/
|
*/
|
||||||
cipherDataKey: function cipherDataKeyMem(cryptoScheme,
|
cipherDataKey: function cipherDataKeyMem(cryptoScheme,
|
||||||
masterKeyId,
|
masterKeyId,
|
||||||
plainTextDataKey,
|
plainTextDataKey,
|
||||||
log,
|
log,
|
||||||
cb) {
|
cb) {
|
||||||
process.nextTick(() => {
|
process.nextTick(() => {
|
||||||
const masterKey = Buffer.from(masterKeyId, 'hex');
|
const masterKey = Buffer.from(masterKeyId, 'hex');
|
||||||
Common.createCipher(
|
Common.createCipher(
|
||||||
|
@ -81,7 +81,7 @@ const backend = {
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @param {number} cryptoScheme - crypto scheme version number
|
* @param {number} cryptoScheme - crypto scheme version number
|
||||||
* @param {string} masterKeyId - master key; for the file backend
|
* @param {string} masterKeyId - master key; for the file backend
|
||||||
|
@ -94,10 +94,10 @@ const backend = {
|
||||||
* @callback called with (err, plainTextDataKey: Buffer)
|
* @callback called with (err, plainTextDataKey: Buffer)
|
||||||
*/
|
*/
|
||||||
decipherDataKey: function decipherDataKeyMem(cryptoScheme,
|
decipherDataKey: function decipherDataKeyMem(cryptoScheme,
|
||||||
masterKeyId,
|
masterKeyId,
|
||||||
cipheredDataKey,
|
cipheredDataKey,
|
||||||
log,
|
log,
|
||||||
cb) {
|
cb) {
|
||||||
process.nextTick(() => {
|
process.nextTick(() => {
|
||||||
const masterKey = Buffer.from(masterKeyId, 'hex');
|
const masterKey = Buffer.from(masterKeyId, 'hex');
|
||||||
Common.createDecipher(
|
Common.createDecipher(
|
||||||
|
|
|
@ -8,7 +8,7 @@ const backend = {
|
||||||
* Target implementation will be async. let's mimic it
|
* Target implementation will be async. let's mimic it
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @param {string} bucketName - bucket name
|
* @param {string} bucketName - bucket name
|
||||||
* @param {object} log - logger object
|
* @param {object} log - logger object
|
||||||
|
@ -40,7 +40,7 @@ const backend = {
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @param {number} cryptoScheme - crypto scheme version number
|
* @param {number} cryptoScheme - crypto scheme version number
|
||||||
* @param {string} masterKeyId - key to retrieve master key
|
* @param {string} masterKeyId - key to retrieve master key
|
||||||
|
@ -51,10 +51,10 @@ const backend = {
|
||||||
* @callback called with (err, cipheredDataKey: Buffer)
|
* @callback called with (err, cipheredDataKey: Buffer)
|
||||||
*/
|
*/
|
||||||
cipherDataKey: function cipherDataKeyMem(cryptoScheme,
|
cipherDataKey: function cipherDataKeyMem(cryptoScheme,
|
||||||
masterKeyId,
|
masterKeyId,
|
||||||
plainTextDataKey,
|
plainTextDataKey,
|
||||||
log,
|
log,
|
||||||
cb) {
|
cb) {
|
||||||
process.nextTick(() => {
|
process.nextTick(() => {
|
||||||
Common.createCipher(
|
Common.createCipher(
|
||||||
cryptoScheme, kms[masterKeyId], 0, log,
|
cryptoScheme, kms[masterKeyId], 0, log,
|
||||||
|
@ -78,7 +78,7 @@ const backend = {
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @param {number} cryptoScheme - crypto scheme version number
|
* @param {number} cryptoScheme - crypto scheme version number
|
||||||
* @param {string} masterKeyId - key to retrieve master key
|
* @param {string} masterKeyId - key to retrieve master key
|
||||||
|
@ -89,10 +89,10 @@ const backend = {
|
||||||
* @callback called with (err, plainTextDataKey: Buffer)
|
* @callback called with (err, plainTextDataKey: Buffer)
|
||||||
*/
|
*/
|
||||||
decipherDataKey: function decipherDataKeyMem(cryptoScheme,
|
decipherDataKey: function decipherDataKeyMem(cryptoScheme,
|
||||||
masterKeyId,
|
masterKeyId,
|
||||||
cipheredDataKey,
|
cipheredDataKey,
|
||||||
log,
|
log,
|
||||||
cb) {
|
cb) {
|
||||||
process.nextTick(() => {
|
process.nextTick(() => {
|
||||||
Common.createDecipher(
|
Common.createDecipher(
|
||||||
cryptoScheme, kms[masterKeyId], 0, log,
|
cryptoScheme, kms[masterKeyId], 0, log,
|
||||||
|
|
|
@ -6,12 +6,12 @@ const https = require('https');
|
||||||
const logger = require('../utilities/logger');
|
const logger = require('../utilities/logger');
|
||||||
|
|
||||||
function _createEncryptedBucket(host,
|
function _createEncryptedBucket(host,
|
||||||
port,
|
port,
|
||||||
bucketName,
|
bucketName,
|
||||||
accessKey,
|
accessKey,
|
||||||
secretKey,
|
secretKey,
|
||||||
verbose, ssl,
|
verbose, ssl,
|
||||||
locationConstraint) {
|
locationConstraint) {
|
||||||
const options = {
|
const options = {
|
||||||
host,
|
host,
|
||||||
port,
|
port,
|
||||||
|
@ -82,11 +82,11 @@ function createEncryptedBucket() {
|
||||||
.option('-s, --ssl', 'Enable ssl')
|
.option('-s, --ssl', 'Enable ssl')
|
||||||
.option('-v, --verbose')
|
.option('-v, --verbose')
|
||||||
.option('-l, --location-constraint <locationConstraint>',
|
.option('-l, --location-constraint <locationConstraint>',
|
||||||
'location Constraint')
|
'location Constraint')
|
||||||
.parse(process.argv);
|
.parse(process.argv);
|
||||||
|
|
||||||
const { host, port, accessKey, secretKey, bucket, verbose, ssl,
|
const { host, port, accessKey, secretKey, bucket, verbose, ssl,
|
||||||
locationConstraint } = commander;
|
locationConstraint } = commander;
|
||||||
|
|
||||||
if (!host || !port || !accessKey || !secretKey || !bucket) {
|
if (!host || !port || !accessKey || !secretKey || !bucket) {
|
||||||
logger.error('missing parameter');
|
logger.error('missing parameter');
|
||||||
|
|
|
@ -11,14 +11,13 @@ const Common = require('./common');
|
||||||
let scalityKMS;
|
let scalityKMS;
|
||||||
let scalityKMSImpl;
|
let scalityKMSImpl;
|
||||||
try {
|
try {
|
||||||
// eslint-disable-next-line import/no-unresolved
|
|
||||||
const ScalityKMS = require('scality-kms');
|
const ScalityKMS = require('scality-kms');
|
||||||
scalityKMS = new ScalityKMS(config.kms);
|
scalityKMS = new ScalityKMS(config.kms);
|
||||||
scalityKMSImpl = 'scalityKms';
|
scalityKMSImpl = 'scalityKms';
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.warn('scality kms unavailable. ' +
|
logger.warn('scality kms unavailable. ' +
|
||||||
'Using file kms backend unless mem specified.',
|
'Using file kms backend unless mem specified.',
|
||||||
{ error });
|
{ error });
|
||||||
scalityKMS = file;
|
scalityKMS = file;
|
||||||
scalityKMSImpl = 'fileKms';
|
scalityKMSImpl = 'fileKms';
|
||||||
}
|
}
|
||||||
|
@ -47,7 +46,7 @@ if (config.backends.kms === 'mem') {
|
||||||
}
|
}
|
||||||
|
|
||||||
class KMS {
|
class KMS {
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @param {string} bucketName - bucket name
|
* @param {string} bucketName - bucket name
|
||||||
* @param {object} log - logger object
|
* @param {object} log - logger object
|
||||||
|
@ -67,7 +66,7 @@ class KMS {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @param {string} bucketName - bucket name
|
* @param {string} bucketName - bucket name
|
||||||
* @param {object} sseConfig - SSE configuration
|
* @param {object} sseConfig - SSE configuration
|
||||||
|
@ -105,7 +104,7 @@ class KMS {
|
||||||
return cb(null, serverSideEncryptionInfo);
|
return cb(null, serverSideEncryptionInfo);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* no encryption
|
* no encryption
|
||||||
*/
|
*/
|
||||||
return cb(null, null);
|
return cb(null, null);
|
||||||
|
@ -144,7 +143,7 @@ class KMS {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* createCipherBundle
|
* createCipherBundle
|
||||||
* @param {object} serverSideEncryptionInfo - info for encryption
|
* @param {object} serverSideEncryptionInfo - info for encryption
|
||||||
* @param {number} serverSideEncryptionInfo.cryptoScheme -
|
* @param {number} serverSideEncryptionInfo.cryptoScheme -
|
||||||
|
@ -161,7 +160,7 @@ class KMS {
|
||||||
* @callback called with (err, cipherBundle)
|
* @callback called with (err, cipherBundle)
|
||||||
*/
|
*/
|
||||||
static createCipherBundle(serverSideEncryptionInfo,
|
static createCipherBundle(serverSideEncryptionInfo,
|
||||||
log, cb) {
|
log, cb) {
|
||||||
const dataKey = this.createDataKey(log);
|
const dataKey = this.createDataKey(log);
|
||||||
|
|
||||||
const { algorithm, configuredMasterKeyId, masterKeyId: bucketMasterKeyId } = serverSideEncryptionInfo;
|
const { algorithm, configuredMasterKeyId, masterKeyId: bucketMasterKeyId } = serverSideEncryptionInfo;
|
||||||
|
@ -204,7 +203,7 @@ class KMS {
|
||||||
dataKey.fill(0);
|
dataKey.fill(0);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error from kms',
|
log.debug('error from kms',
|
||||||
{ implName, error: err });
|
{ implName, error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
log.trace('cipher created by the kms');
|
log.trace('cipher created by the kms');
|
||||||
|
@ -218,13 +217,13 @@ class KMS {
|
||||||
], (err, cipherBundle) => {
|
], (err, cipherBundle) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error processing cipher bundle',
|
log.error('error processing cipher bundle',
|
||||||
{ implName, error: err });
|
{ implName, error: err });
|
||||||
}
|
}
|
||||||
return cb(err, cipherBundle);
|
return cb(err, cipherBundle);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* createDecipherBundle
|
* createDecipherBundle
|
||||||
* @param {object} serverSideEncryptionInfo - info for decryption
|
* @param {object} serverSideEncryptionInfo - info for decryption
|
||||||
* @param {number} serverSideEncryptionInfo.cryptoScheme -
|
* @param {number} serverSideEncryptionInfo.cryptoScheme -
|
||||||
|
@ -244,7 +243,7 @@ class KMS {
|
||||||
* @callback called with (err, decipherBundle)
|
* @callback called with (err, decipherBundle)
|
||||||
*/
|
*/
|
||||||
static createDecipherBundle(serverSideEncryptionInfo, offset,
|
static createDecipherBundle(serverSideEncryptionInfo, offset,
|
||||||
log, cb) {
|
log, cb) {
|
||||||
if (!serverSideEncryptionInfo.masterKeyId ||
|
if (!serverSideEncryptionInfo.masterKeyId ||
|
||||||
!serverSideEncryptionInfo.cipheredDataKey ||
|
!serverSideEncryptionInfo.cipheredDataKey ||
|
||||||
!serverSideEncryptionInfo.cryptoScheme) {
|
!serverSideEncryptionInfo.cryptoScheme) {
|
||||||
|
@ -265,7 +264,7 @@ class KMS {
|
||||||
log.debug('deciphering a data key');
|
log.debug('deciphering a data key');
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error from kms',
|
log.debug('error from kms',
|
||||||
{ implName, error: err });
|
{ implName, error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
log.trace('data key deciphered by the kms');
|
log.trace('data key deciphered by the kms');
|
||||||
|
@ -279,7 +278,7 @@ class KMS {
|
||||||
plainTextDataKey.fill(0);
|
plainTextDataKey.fill(0);
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error from kms',
|
log.debug('error from kms',
|
||||||
{ implName, error: err });
|
{ implName, error: err });
|
||||||
return next(err);
|
return next(err);
|
||||||
}
|
}
|
||||||
log.trace('decipher created by the kms');
|
log.trace('decipher created by the kms');
|
||||||
|
@ -293,7 +292,7 @@ class KMS {
|
||||||
], (err, decipherBundle) => {
|
], (err, decipherBundle) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error processing decipher bundle',
|
log.error('error processing decipher bundle',
|
||||||
{ implName, error: err });
|
{ implName, error: err });
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
return cb(err, decipherBundle);
|
return cb(err, decipherBundle);
|
||||||
|
|
|
@ -68,26 +68,26 @@ const acl = {
|
||||||
let grantWriteHeader = [];
|
let grantWriteHeader = [];
|
||||||
if (resourceType === 'bucket') {
|
if (resourceType === 'bucket') {
|
||||||
grantWriteHeader = aclUtils
|
grantWriteHeader = aclUtils
|
||||||
.parseGrant(headers['x-amz-grant-write'], 'WRITE');
|
.parseGrant(headers['x-amz-grant-write'], 'WRITE');
|
||||||
}
|
}
|
||||||
const grantReadACPHeader = aclUtils
|
const grantReadACPHeader = aclUtils
|
||||||
.parseGrant(headers['x-amz-grant-read-acp'], 'READ_ACP');
|
.parseGrant(headers['x-amz-grant-read-acp'], 'READ_ACP');
|
||||||
const grantWriteACPHeader = aclUtils
|
const grantWriteACPHeader = aclUtils
|
||||||
.parseGrant(headers['x-amz-grant-write-acp'], 'WRITE_ACP');
|
.parseGrant(headers['x-amz-grant-write-acp'], 'WRITE_ACP');
|
||||||
const grantFullControlHeader = aclUtils
|
const grantFullControlHeader = aclUtils
|
||||||
.parseGrant(headers['x-amz-grant-full-control'], 'FULL_CONTROL');
|
.parseGrant(headers['x-amz-grant-full-control'], 'FULL_CONTROL');
|
||||||
const allGrantHeaders =
|
const allGrantHeaders =
|
||||||
[].concat(grantReadHeader, grantWriteHeader,
|
[].concat(grantReadHeader, grantWriteHeader,
|
||||||
grantReadACPHeader, grantWriteACPHeader,
|
grantReadACPHeader, grantWriteACPHeader,
|
||||||
grantFullControlHeader).filter(item => item !== undefined);
|
grantFullControlHeader).filter(item => item !== undefined);
|
||||||
if (allGrantHeaders.length === 0) {
|
if (allGrantHeaders.length === 0) {
|
||||||
return cb(null, currentResourceACL);
|
return cb(null, currentResourceACL);
|
||||||
}
|
}
|
||||||
|
|
||||||
const usersIdentifiedByEmail = allGrantHeaders
|
const usersIdentifiedByEmail = allGrantHeaders
|
||||||
.filter(it => it && it.userIDType.toLowerCase() === 'emailaddress');
|
.filter(it => it && it.userIDType.toLowerCase() === 'emailaddress');
|
||||||
const usersIdentifiedByGroup = allGrantHeaders
|
const usersIdentifiedByGroup = allGrantHeaders
|
||||||
.filter(item => item && item.userIDType.toLowerCase() === 'uri');
|
.filter(item => item && item.userIDType.toLowerCase() === 'uri');
|
||||||
const justEmails = usersIdentifiedByEmail.map(item => item.identifier);
|
const justEmails = usersIdentifiedByEmail.map(item => item.identifier);
|
||||||
const validGroups = [
|
const validGroups = [
|
||||||
constants.allAuthedUsersId,
|
constants.allAuthedUsersId,
|
||||||
|
@ -101,7 +101,7 @@ const acl = {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
const usersIdentifiedByID = allGrantHeaders
|
const usersIdentifiedByID = allGrantHeaders
|
||||||
.filter(item => item && item.userIDType.toLowerCase() === 'id');
|
.filter(item => item && item.userIDType.toLowerCase() === 'id');
|
||||||
// TODO: Consider whether want to verify with Vault
|
// TODO: Consider whether want to verify with Vault
|
||||||
// whether canonicalID is associated with existing
|
// whether canonicalID is associated with existing
|
||||||
// account before adding to ACL
|
// account before adding to ACL
|
||||||
|
@ -128,7 +128,7 @@ const acl = {
|
||||||
// If don't have to look up canonicalID's just sort grants
|
// If don't have to look up canonicalID's just sort grants
|
||||||
// and add to bucket
|
// and add to bucket
|
||||||
const revisedACL = aclUtils
|
const revisedACL = aclUtils
|
||||||
.sortHeaderGrants(allGrantHeaders, resourceACL);
|
.sortHeaderGrants(allGrantHeaders, resourceACL);
|
||||||
return cb(null, revisedACL);
|
return cb(null, revisedACL);
|
||||||
}
|
}
|
||||||
return undefined;
|
return undefined;
|
||||||
|
|
|
@ -8,7 +8,7 @@ const { config } = require('../../Config');
|
||||||
class BucketClientInterface {
|
class BucketClientInterface {
|
||||||
constructor() {
|
constructor() {
|
||||||
assert(config.bucketd.bootstrap.length > 0,
|
assert(config.bucketd.bootstrap.length > 0,
|
||||||
'bucketd bootstrap list is empty');
|
'bucketd bootstrap list is empty');
|
||||||
const { bootstrap, log } = config.bucketd;
|
const { bootstrap, log } = config.bucketd;
|
||||||
if (config.https) {
|
if (config.https) {
|
||||||
const { key, cert, ca } = config.https;
|
const { key, cert, ca } = config.https;
|
||||||
|
@ -31,7 +31,7 @@ class BucketClientInterface {
|
||||||
|
|
||||||
createBucket(bucketName, bucketMD, log, cb) {
|
createBucket(bucketName, bucketMD, log, cb) {
|
||||||
this.client.createBucket(bucketName, log.getSerializedUids(),
|
this.client.createBucket(bucketName, log.getSerializedUids(),
|
||||||
bucketMD.serialize(), cb);
|
bucketMD.serialize(), cb);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,17 +59,17 @@ class BucketClientInterface {
|
||||||
|
|
||||||
getRaftBuckets(raftId, log, cb) {
|
getRaftBuckets(raftId, log, cb) {
|
||||||
return this.client.getRaftBuckets(raftId, log.getSerializedUids(),
|
return this.client.getRaftBuckets(raftId, log.getSerializedUids(),
|
||||||
(err, data) => {
|
(err, data) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
return cb(null, JSON.parse(data));
|
return cb(null, JSON.parse(data));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
putBucketAttributes(bucketName, bucketMD, log, cb) {
|
putBucketAttributes(bucketName, bucketMD, log, cb) {
|
||||||
this.client.putBucketAttributes(bucketName, log.getSerializedUids(),
|
this.client.putBucketAttributes(bucketName, log.getSerializedUids(),
|
||||||
bucketMD.serialize(), cb);
|
bucketMD.serialize(), cb);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,7 +97,7 @@ class BucketClientInterface {
|
||||||
|
|
||||||
deleteObject(bucketName, objName, params, log, cb) {
|
deleteObject(bucketName, objName, params, log, cb) {
|
||||||
this.client.deleteObject(bucketName, objName, log.getSerializedUids(),
|
this.client.deleteObject(bucketName, objName, log.getSerializedUids(),
|
||||||
cb, params);
|
cb, params);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,8 +185,8 @@ class BucketClientInterface {
|
||||||
reason.msg = undefined;
|
reason.msg = undefined;
|
||||||
respBody[implName] = {
|
respBody[implName] = {
|
||||||
code: 200,
|
code: 200,
|
||||||
message, // Provide interpreted reason msg
|
message, // Provide interpreted reason msg
|
||||||
body: reason, // Provide analysis data
|
body: reason, // Provide analysis data
|
||||||
};
|
};
|
||||||
if (failure) {
|
if (failure) {
|
||||||
// Setting the `error` field is how the healthCheck
|
// Setting the `error` field is how the healthCheck
|
||||||
|
|
|
@ -13,7 +13,6 @@ const versionSep = arsenal.versioning.VersioningConstants.VersionId.Separator;
|
||||||
const METASTORE = '__metastore';
|
const METASTORE = '__metastore';
|
||||||
|
|
||||||
class BucketFileInterface {
|
class BucketFileInterface {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @constructor
|
* @constructor
|
||||||
* @param {object} [params] - constructor params
|
* @param {object} [params] - constructor params
|
||||||
|
@ -54,7 +53,7 @@ class BucketFileInterface {
|
||||||
if (err) {
|
if (err) {
|
||||||
this.logger.fatal('error writing usersBucket ' +
|
this.logger.fatal('error writing usersBucket ' +
|
||||||
'attributes to metadata',
|
'attributes to metadata',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
throw (errors.InternalError);
|
throw (errors.InternalError);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
@ -84,15 +83,15 @@ class BucketFileInterface {
|
||||||
|
|
||||||
createBucket(bucketName, bucketMD, log, cb) {
|
createBucket(bucketName, bucketMD, log, cb) {
|
||||||
this.getBucketAttributes(bucketName, log, err => {
|
this.getBucketAttributes(bucketName, log, err => {
|
||||||
if (err && err !== errors.NoSuchBucket) {
|
if (err && !err.is.NoSuchBucket) {
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
if (err === undefined) {
|
if (err === undefined) {
|
||||||
return cb(errors.BucketAlreadyExists);
|
return cb(errors.BucketAlreadyExists);
|
||||||
}
|
}
|
||||||
this.putBucketAttributes(bucketName,
|
this.putBucketAttributes(bucketName,
|
||||||
bucketMD,
|
bucketMD,
|
||||||
log, cb);
|
log, cb);
|
||||||
return undefined;
|
return undefined;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -178,7 +177,7 @@ class BucketFileInterface {
|
||||||
errorStack: err.stack,
|
errorStack: err.stack,
|
||||||
};
|
};
|
||||||
log.error('error deleting bucket',
|
log.error('error deleting bucket',
|
||||||
logObj);
|
logObj);
|
||||||
return cb(errors.InternalError);
|
return cb(errors.InternalError);
|
||||||
}
|
}
|
||||||
return cb();
|
return cb();
|
||||||
|
|
|
@ -146,7 +146,7 @@ const metastore = {
|
||||||
return cb(null, {
|
return cb(null, {
|
||||||
bucket: bucket.serialize(),
|
bucket: bucket.serialize(),
|
||||||
obj: JSON.stringify(
|
obj: JSON.stringify(
|
||||||
metadata.keyMaps.get(bucketName).get(objName)
|
metadata.keyMaps.get(bucketName).get(objName),
|
||||||
),
|
),
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
@ -184,7 +184,7 @@ const metastore = {
|
||||||
if (params && params.versionId) {
|
if (params && params.versionId) {
|
||||||
const baseKey = inc(formatVersionKey(objName, ''));
|
const baseKey = inc(formatVersionKey(objName, ''));
|
||||||
const vobjName = formatVersionKey(objName,
|
const vobjName = formatVersionKey(objName,
|
||||||
params.versionId);
|
params.versionId);
|
||||||
metadata.keyMaps.get(bucketName).delete(vobjName);
|
metadata.keyMaps.get(bucketName).delete(vobjName);
|
||||||
const mst = metadata.keyMaps.get(bucketName).get(objName);
|
const mst = metadata.keyMaps.get(bucketName).get(objName);
|
||||||
if (mst.versionId === params.versionId) {
|
if (mst.versionId === params.versionId) {
|
||||||
|
|
|
@ -151,7 +151,7 @@ function metadataGetObject(bucketName, objectKey, versionId, log, cb) {
|
||||||
(err, objMD) => {
|
(err, objMD) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('err getting object MD from metadata',
|
log.debug('err getting object MD from metadata',
|
||||||
{ error: err });
|
{ error: err });
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
if (versionId === 'null') {
|
if (versionId === 'null') {
|
||||||
|
@ -190,7 +190,7 @@ function metadataValidateBucketAndObj(params, log, callback) {
|
||||||
return next(errors.MethodNotAllowed, bucket);
|
return next(errors.MethodNotAllowed, bucket);
|
||||||
}
|
}
|
||||||
if (!isBucketAuthorized(bucket, (preciseRequestType || requestType), canonicalID,
|
if (!isBucketAuthorized(bucket, (preciseRequestType || requestType), canonicalID,
|
||||||
authInfo, log, request)) {
|
authInfo, log, request)) {
|
||||||
log.debug('access denied for user on bucket', { requestType });
|
log.debug('access denied for user on bucket', { requestType });
|
||||||
return next(errors.AccessDenied, bucket);
|
return next(errors.AccessDenied, bucket);
|
||||||
}
|
}
|
||||||
|
|
|
@ -93,24 +93,24 @@ const metadata = {
|
||||||
const value = typeof objVal.getValue === 'function' ?
|
const value = typeof objVal.getValue === 'function' ?
|
||||||
objVal.getValue() : objVal;
|
objVal.getValue() : objVal;
|
||||||
client.putObject(bucketName, objName, value, params, log,
|
client.putObject(bucketName, objName, value, params, log,
|
||||||
(err, data) => {
|
(err, data) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.debug('error from metadata', { implName, error: err });
|
log.debug('error from metadata', { implName, error: err });
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
if (data) {
|
if (data) {
|
||||||
log.debug('object version successfully put in metadata',
|
log.debug('object version successfully put in metadata',
|
||||||
{ version: data });
|
{ version: data });
|
||||||
} else {
|
} else {
|
||||||
log.debug('object successfully put in metadata');
|
log.debug('object successfully put in metadata');
|
||||||
}
|
}
|
||||||
return cb(err, data);
|
return cb(err, data);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
getBucketAndObjectMD: (bucketName, objName, params, log, cb) => {
|
getBucketAndObjectMD: (bucketName, objName, params, log, cb) => {
|
||||||
log.debug('getting bucket and object from metadata',
|
log.debug('getting bucket and object from metadata',
|
||||||
{ database: bucketName, object: objName });
|
{ database: bucketName, object: objName });
|
||||||
client.getBucketAndObject(bucketName, objName, params, log,
|
client.getBucketAndObject(bucketName, objName, params, log,
|
||||||
(err, data) => {
|
(err, data) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
|
@ -118,7 +118,7 @@ const metadata = {
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
log.debug('bucket and object retrieved from metadata',
|
log.debug('bucket and object retrieved from metadata',
|
||||||
{ database: bucketName, object: objName });
|
{ database: bucketName, object: objName });
|
||||||
return cb(err, data);
|
return cb(err, data);
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
|
@ -11,7 +11,7 @@ const locationConstraintCheck = require(
|
||||||
'../api/apiUtils/object/locationConstraintCheck');
|
'../api/apiUtils/object/locationConstraintCheck');
|
||||||
const { dataStore } = require('../api/apiUtils/object/storeObject');
|
const { dataStore } = require('../api/apiUtils/object/storeObject');
|
||||||
const prepareRequestContexts = require(
|
const prepareRequestContexts = require(
|
||||||
'../api/apiUtils/authorization/prepareRequestContexts');
|
'../api/apiUtils/authorization/prepareRequestContexts');
|
||||||
const { decodeVersionId } = require('../api/apiUtils/object/versioning');
|
const { decodeVersionId } = require('../api/apiUtils/object/versioning');
|
||||||
const locationKeysHaveChanged
|
const locationKeysHaveChanged
|
||||||
= require('../api/apiUtils/object/locationKeysHaveChanged');
|
= require('../api/apiUtils/object/locationKeysHaveChanged');
|
||||||
|
@ -81,7 +81,7 @@ function _getRequestPayload(req, cb) {
|
||||||
payload.push(chunk);
|
payload.push(chunk);
|
||||||
payloadLen += chunk.length;
|
payloadLen += chunk.length;
|
||||||
}).on('error', cb)
|
}).on('error', cb)
|
||||||
.on('end', () => cb(null, Buffer.concat(payload, payloadLen).toString()));
|
.on('end', () => cb(null, Buffer.concat(payload, payloadLen).toString()));
|
||||||
}
|
}
|
||||||
|
|
||||||
function _checkMultipleBackendRequest(request, log) {
|
function _checkMultipleBackendRequest(request, log) {
|
||||||
|
@ -207,19 +207,19 @@ function handleTaggingOperation(request, response, type, dataStoreVersionId,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return multipleBackendGateway.objectTagging(type, request.objectKey,
|
return multipleBackendGateway.objectTagging(type, request.objectKey,
|
||||||
request.bucketName, objectMD, log, err => {
|
request.bucketName, objectMD, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error(`error during object tagging: ${type}`, {
|
log.error(`error during object tagging: ${type}`, {
|
||||||
error: err,
|
error: err,
|
||||||
method: 'handleTaggingOperation',
|
method: 'handleTaggingOperation',
|
||||||
});
|
});
|
||||||
return callback(err);
|
return callback(err);
|
||||||
}
|
}
|
||||||
const dataRetrievalInfo = {
|
const dataRetrievalInfo = {
|
||||||
versionId: dataStoreVersionId,
|
versionId: dataStoreVersionId,
|
||||||
};
|
};
|
||||||
return _respond(response, dataRetrievalInfo, log, callback);
|
return _respond(response, dataRetrievalInfo, log, callback);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -386,26 +386,26 @@ function putMetadata(request, response, bucketInfo, objMd, log, callback) {
|
||||||
objectKey,
|
objectKey,
|
||||||
});
|
});
|
||||||
async.eachLimit(objMd.location, 5,
|
async.eachLimit(objMd.location, 5,
|
||||||
(loc, next) => data.delete(loc, log, err => {
|
(loc, next) => data.delete(loc, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.warn('error removing old data location key', {
|
log.warn('error removing old data location key', {
|
||||||
|
bucketName,
|
||||||
|
objectKey,
|
||||||
|
locationKey: loc,
|
||||||
|
error: err.message,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
// do not forward the error to let other
|
||||||
|
// locations be deleted
|
||||||
|
next();
|
||||||
|
}),
|
||||||
|
() => {
|
||||||
|
log.debug('done removing old data locations', {
|
||||||
|
method: 'putMetadata',
|
||||||
bucketName,
|
bucketName,
|
||||||
objectKey,
|
objectKey,
|
||||||
locationKey: loc,
|
|
||||||
error: err.message,
|
|
||||||
});
|
});
|
||||||
}
|
|
||||||
// do not forward the error to let other
|
|
||||||
// locations be deleted
|
|
||||||
next();
|
|
||||||
}),
|
|
||||||
() => {
|
|
||||||
log.debug('done removing old data locations', {
|
|
||||||
method: 'putMetadata',
|
|
||||||
bucketName,
|
|
||||||
objectKey,
|
|
||||||
});
|
});
|
||||||
});
|
|
||||||
}
|
}
|
||||||
return _respond(response, md, log, callback);
|
return _respond(response, md, log, callback);
|
||||||
});
|
});
|
||||||
|
@ -719,7 +719,7 @@ function batchDelete(request, response, log, callback) {
|
||||||
log.trace('batch delete locations', { locations });
|
log.trace('batch delete locations', { locations });
|
||||||
return async.eachLimit(locations, 5, (loc, next) => {
|
return async.eachLimit(locations, 5, (loc, next) => {
|
||||||
data.delete(loc, log, err => {
|
data.delete(loc, log, err => {
|
||||||
if (err && err.ObjNotFound) {
|
if (err?.is.ObjNotFound) {
|
||||||
log.info('batch delete: data location do not exist', {
|
log.info('batch delete: data location do not exist', {
|
||||||
method: 'batchDelete',
|
method: 'batchDelete',
|
||||||
location: loc,
|
location: loc,
|
||||||
|
@ -787,7 +787,7 @@ function routeBackbeat(clientIP, request, response, log) {
|
||||||
(backbeatRoutes[request.method] === undefined ||
|
(backbeatRoutes[request.method] === undefined ||
|
||||||
backbeatRoutes[request.method][request.resourceType] === undefined ||
|
backbeatRoutes[request.method][request.resourceType] === undefined ||
|
||||||
(backbeatRoutes[request.method][request.resourceType]
|
(backbeatRoutes[request.method][request.resourceType]
|
||||||
[request.query.operation] === undefined &&
|
[request.query.operation] === undefined &&
|
||||||
useMultipleBackend));
|
useMultipleBackend));
|
||||||
log.addDefaultFields({
|
log.addDefaultFields({
|
||||||
bucketName: request.bucketName,
|
bucketName: request.bucketName,
|
||||||
|
@ -835,49 +835,49 @@ function routeBackbeat(clientIP, request, response, log) {
|
||||||
}
|
}
|
||||||
return next(err, userInfo);
|
return next(err, userInfo);
|
||||||
}, 's3', requestContexts),
|
}, 's3', requestContexts),
|
||||||
(userInfo, next) => {
|
(userInfo, next) => {
|
||||||
if (useMultipleBackend) {
|
if (useMultipleBackend) {
|
||||||
// Bucket and object do not exist in metadata.
|
// Bucket and object do not exist in metadata.
|
||||||
return next(null, null, null);
|
return next(null, null, null);
|
||||||
}
|
}
|
||||||
const mdValParams = {
|
const mdValParams = {
|
||||||
|
bucketName: request.bucketName,
|
||||||
|
objectKey: request.objectKey,
|
||||||
|
authInfo: userInfo,
|
||||||
|
versionId,
|
||||||
|
requestType: 'ReplicateObject',
|
||||||
|
request,
|
||||||
|
};
|
||||||
|
return metadataValidateBucketAndObj(mdValParams, log, next);
|
||||||
|
},
|
||||||
|
(bucketInfo, objMd, next) => {
|
||||||
|
if (useMultipleBackend) {
|
||||||
|
return backbeatRoutes[request.method][request.resourceType]
|
||||||
|
[request.query.operation](request, response, log, next);
|
||||||
|
}
|
||||||
|
const versioningConfig = bucketInfo.getVersioningConfiguration();
|
||||||
|
if (!versioningConfig || versioningConfig.Status !== 'Enabled') {
|
||||||
|
log.debug('bucket versioning is not enabled', {
|
||||||
|
method: request.method,
|
||||||
bucketName: request.bucketName,
|
bucketName: request.bucketName,
|
||||||
objectKey: request.objectKey,
|
objectKey: request.objectKey,
|
||||||
authInfo: userInfo,
|
resourceType: request.resourceType,
|
||||||
versionId,
|
});
|
||||||
requestType: 'ReplicateObject',
|
return next(errors.InvalidBucketState);
|
||||||
request,
|
}
|
||||||
};
|
return backbeatRoutes[request.method][request.resourceType](
|
||||||
return metadataValidateBucketAndObj(mdValParams, log, next);
|
request, response, bucketInfo, objMd, log, next);
|
||||||
},
|
}],
|
||||||
(bucketInfo, objMd, next) => {
|
err => {
|
||||||
if (useMultipleBackend) {
|
if (err) {
|
||||||
return backbeatRoutes[request.method][request.resourceType]
|
return responseJSONBody(err, null, response, log);
|
||||||
[request.query.operation](request, response, log, next);
|
}
|
||||||
}
|
log.debug('backbeat route response sent successfully',
|
||||||
const versioningConfig = bucketInfo.getVersioningConfiguration();
|
{ method: request.method,
|
||||||
if (!versioningConfig || versioningConfig.Status !== 'Enabled') {
|
bucketName: request.bucketName,
|
||||||
log.debug('bucket versioning is not enabled', {
|
objectKey: request.objectKey });
|
||||||
method: request.method,
|
return undefined;
|
||||||
bucketName: request.bucketName,
|
});
|
||||||
objectKey: request.objectKey,
|
|
||||||
resourceType: request.resourceType,
|
|
||||||
});
|
|
||||||
return next(errors.InvalidBucketState);
|
|
||||||
}
|
|
||||||
return backbeatRoutes[request.method][request.resourceType](
|
|
||||||
request, response, bucketInfo, objMd, log, next);
|
|
||||||
}],
|
|
||||||
err => {
|
|
||||||
if (err) {
|
|
||||||
return responseJSONBody(err, null, response, log);
|
|
||||||
}
|
|
||||||
log.debug('backbeat route response sent successfully',
|
|
||||||
{ method: request.method,
|
|
||||||
bucketName: request.bucketName,
|
|
||||||
objectKey: request.objectKey });
|
|
||||||
return undefined;
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -85,22 +85,22 @@ function routeMetadata(clientIP, request, response, log) {
|
||||||
return metadataProxy.web(request, response, { target }, err => {
|
return metadataProxy.web(request, response, { target }, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error proxying request to metadata admin server',
|
log.error('error proxying request to metadata admin server',
|
||||||
{ error: err.message });
|
{ error: err.message });
|
||||||
return next(errors.ServiceUnavailable);
|
return next(errors.ServiceUnavailable);
|
||||||
}
|
}
|
||||||
return next();
|
return next();
|
||||||
});
|
});
|
||||||
}],
|
}],
|
||||||
err => {
|
err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return responseJSONBody(err, null, response, log);
|
return responseJSONBody(err, null, response, log);
|
||||||
}
|
}
|
||||||
log.debug('metadata route response sent successfully',
|
log.debug('metadata route response sent successfully',
|
||||||
{ method: request.method,
|
{ method: request.method,
|
||||||
bucketName: request.bucketName,
|
bucketName: request.bucketName,
|
||||||
objectKey: request.objectKey });
|
objectKey: request.objectKey });
|
||||||
return undefined;
|
return undefined;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ function getMetricToPush(prevObjectMD, newObjectMD) {
|
||||||
assert.deepStrictEqual(prevObjectMD.getAcl(), newObjectMD.getAcl());
|
assert.deepStrictEqual(prevObjectMD.getAcl(), newObjectMD.getAcl());
|
||||||
assert.deepStrictEqual(
|
assert.deepStrictEqual(
|
||||||
prevObjectMD.getTags(),
|
prevObjectMD.getTags(),
|
||||||
newObjectMD.getTags()
|
newObjectMD.getTags(),
|
||||||
);
|
);
|
||||||
} catch (e) {
|
} catch (e) {
|
||||||
return 'replicateTags';
|
return 'replicateTags';
|
||||||
|
|
|
@ -10,10 +10,18 @@ const { clientCheck } = require('./utilities/healthcheckHandler');
|
||||||
const _config = require('./Config').config;
|
const _config = require('./Config').config;
|
||||||
const { blacklistedPrefixes } = require('../constants');
|
const { blacklistedPrefixes } = require('../constants');
|
||||||
const api = require('./api/api');
|
const api = require('./api/api');
|
||||||
const data = require('./data/wrapper');
|
const dataWrapper = require('./data/wrapper');
|
||||||
|
const kms = require('./kms/wrapper');
|
||||||
|
const locationStorageCheck =
|
||||||
|
require('./api/apiUtils/object/locationStorageCheck');
|
||||||
|
const vault = require('./auth/vault');
|
||||||
|
const metadata = require('./metadata/wrapper');
|
||||||
|
|
||||||
const routes = arsenal.s3routes.routes;
|
const routes = arsenal.s3routes.routes;
|
||||||
|
const { parseLC, MultipleBackendGateway } = arsenal.storage.data;
|
||||||
const websiteEndpoints = _config.websiteEndpoints;
|
const websiteEndpoints = _config.websiteEndpoints;
|
||||||
|
let client = dataWrapper.client;
|
||||||
|
const implName = dataWrapper.implName;
|
||||||
|
|
||||||
let allEndpoints;
|
let allEndpoints;
|
||||||
function updateAllEndpoints() {
|
function updateAllEndpoints() {
|
||||||
|
@ -21,6 +29,13 @@ function updateAllEndpoints() {
|
||||||
}
|
}
|
||||||
_config.on('rest-endpoints-update', updateAllEndpoints);
|
_config.on('rest-endpoints-update', updateAllEndpoints);
|
||||||
updateAllEndpoints();
|
updateAllEndpoints();
|
||||||
|
_config.on('location-constraints-update', () => {
|
||||||
|
if (implName === 'multipleBackends') {
|
||||||
|
const clients = parseLC(_config, vault);
|
||||||
|
client = new MultipleBackendGateway(
|
||||||
|
clients, metadata, locationStorageCheck);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
// redis client
|
// redis client
|
||||||
let localCacheClient;
|
let localCacheClient;
|
||||||
|
@ -78,7 +93,15 @@ class S3Server {
|
||||||
allEndpoints,
|
allEndpoints,
|
||||||
websiteEndpoints,
|
websiteEndpoints,
|
||||||
blacklistedPrefixes,
|
blacklistedPrefixes,
|
||||||
dataRetrievalFn: data.get,
|
dataRetrievalParams: {
|
||||||
|
client,
|
||||||
|
implName,
|
||||||
|
config: _config,
|
||||||
|
kms,
|
||||||
|
metadata,
|
||||||
|
locStorageCheckFn: locationStorageCheck,
|
||||||
|
vault,
|
||||||
|
},
|
||||||
};
|
};
|
||||||
routes(req, res, params, logger, _config);
|
routes(req, res, params, logger, _config);
|
||||||
}
|
}
|
||||||
|
@ -144,7 +167,7 @@ class S3Server {
|
||||||
cleanUp() {
|
cleanUp() {
|
||||||
logger.info('server shutting down');
|
logger.info('server shutting down');
|
||||||
Promise.all(this.servers.map(server =>
|
Promise.all(this.servers.map(server =>
|
||||||
new Promise(resolve => server.close(resolve))
|
new Promise(resolve => server.close(resolve)),
|
||||||
)).then(() => process.exit(0));
|
)).then(() => process.exit(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ const services = {
|
||||||
// buckets to list. By returning an empty array, the
|
// buckets to list. By returning an empty array, the
|
||||||
// getService API will just respond with the user info
|
// getService API will just respond with the user info
|
||||||
// without listing any buckets.
|
// without listing any buckets.
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
log.trace('no buckets found');
|
log.trace('no buckets found');
|
||||||
// If we checked the old user bucket, that means we
|
// If we checked the old user bucket, that means we
|
||||||
// already checked the new user bucket. If neither the
|
// already checked the new user bucket. If neither the
|
||||||
|
@ -65,7 +65,7 @@ const services = {
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Check that hashedStream.completedHash matches header contentMd5.
|
* Check that hashedStream.completedHash matches header contentMd5.
|
||||||
* @param {object} contentMD5 - content-md5 header
|
* @param {object} contentMD5 - content-md5 header
|
||||||
* @param {string} completedHash - hashed stream once completed
|
* @param {string} completedHash - hashed stream once completed
|
||||||
|
@ -76,7 +76,7 @@ const services = {
|
||||||
checkHashMatchMD5(contentMD5, completedHash, log) {
|
checkHashMatchMD5(contentMD5, completedHash, log) {
|
||||||
if (contentMD5 && completedHash && contentMD5 !== completedHash) {
|
if (contentMD5 && completedHash && contentMD5 !== completedHash) {
|
||||||
log.debug('contentMD5 and completedHash does not match',
|
log.debug('contentMD5 and completedHash does not match',
|
||||||
{ method: 'checkHashMatchMD5', completedHash, contentMD5 });
|
{ method: 'checkHashMatchMD5', completedHash, contentMD5 });
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -236,7 +236,7 @@ const services = {
|
||||||
return null;
|
return null;
|
||||||
},
|
},
|
||||||
callback => metadata.putObjectMD(bucketName, objectKey, md,
|
callback => metadata.putObjectMD(bucketName, objectKey, md,
|
||||||
options, log, callback),
|
options, log, callback),
|
||||||
], (err, data) => {
|
], (err, data) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error from metadata', { error: err });
|
log.error('error from metadata', { error: err });
|
||||||
|
@ -296,12 +296,12 @@ const services = {
|
||||||
return cb(null, res);
|
return cb(null, res);
|
||||||
}
|
}
|
||||||
return data.batchDelete(objectMD.location, null, null,
|
return data.batchDelete(objectMD.location, null, null,
|
||||||
deleteLog, err => {
|
deleteLog, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
return cb(null, res);
|
return cb(null, res);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -309,15 +309,15 @@ const services = {
|
||||||
if (objGetInfo && objGetInfo[0]
|
if (objGetInfo && objGetInfo[0]
|
||||||
&& config.backends.data === 'multiple') {
|
&& config.backends.data === 'multiple') {
|
||||||
return multipleBackendGateway.protectAzureBlocks(bucketName,
|
return multipleBackendGateway.protectAzureBlocks(bucketName,
|
||||||
objectKey, objGetInfo[0].dataStoreName, log, err => {
|
objectKey, objGetInfo[0].dataStoreName, log, err => {
|
||||||
// if an error is returned, there is an MPU initiated with same
|
// if an error is returned, there is an MPU initiated with same
|
||||||
// key name as object to delete
|
// key name as object to delete
|
||||||
if (err) {
|
if (err) {
|
||||||
return cb(err.customizeDescription('Error deleting ' +
|
return cb(err.customizeDescription('Error deleting ' +
|
||||||
`object on Azure: ${err.message}`));
|
`object on Azure: ${err.message}`));
|
||||||
}
|
}
|
||||||
return deleteMDandData();
|
return deleteMDandData();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
return deleteMDandData();
|
return deleteMDandData();
|
||||||
},
|
},
|
||||||
|
@ -485,13 +485,13 @@ const services = {
|
||||||
const multipartObjectMD = Object.assign({}, params.storedMetadata);
|
const multipartObjectMD = Object.assign({}, params.storedMetadata);
|
||||||
multipartObjectMD.completeInProgress = true;
|
multipartObjectMD.completeInProgress = true;
|
||||||
metadata.putObjectMD(params.bucketName, longMPUIdentifier, multipartObjectMD,
|
metadata.putObjectMD(params.bucketName, longMPUIdentifier, multipartObjectMD,
|
||||||
{}, log, err => {
|
{}, log, err => {
|
||||||
if (err) {
|
if (err) {
|
||||||
log.error('error from metadata', { error: err });
|
log.error('error from metadata', { error: err });
|
||||||
return cb(err);
|
return cb(err);
|
||||||
}
|
}
|
||||||
return cb();
|
return cb();
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -555,7 +555,7 @@ const services = {
|
||||||
// If the MPU was initiated, the mpu bucket should exist.
|
// If the MPU was initiated, the mpu bucket should exist.
|
||||||
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
||||||
metadata.getBucket(mpuBucketName, log, (err, mpuBucket) => {
|
metadata.getBucket(mpuBucketName, log, (err, mpuBucket) => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
log.debug('bucket not found in metadata', { error: err,
|
log.debug('bucket not found in metadata', { error: err,
|
||||||
method: 'services.metadataValidateMultipart' });
|
method: 'services.metadataValidateMultipart' });
|
||||||
return cb(errors.NoSuchUpload);
|
return cb(errors.NoSuchUpload);
|
||||||
|
@ -577,7 +577,7 @@ const services = {
|
||||||
metadata.getObjectMD(mpuBucket.getName(), mpuOverviewKey,
|
metadata.getObjectMD(mpuBucket.getName(), mpuOverviewKey,
|
||||||
{}, log, (err, storedMetadata) => {
|
{}, log, (err, storedMetadata) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
if (err.NoSuchKey) {
|
if (err.is.NoSuchKey) {
|
||||||
return cb(errors.NoSuchUpload);
|
return cb(errors.NoSuchUpload);
|
||||||
}
|
}
|
||||||
log.error('error from metadata', { error: err });
|
log.error('error from metadata', { error: err });
|
||||||
|
@ -671,7 +671,7 @@ const services = {
|
||||||
* @return {undefined}
|
* @return {undefined}
|
||||||
*/
|
*/
|
||||||
metadataStorePart(mpuBucketName, partLocations,
|
metadataStorePart(mpuBucketName, partLocations,
|
||||||
metaStoreParams, log, cb) {
|
metaStoreParams, log, cb) {
|
||||||
assert.strictEqual(typeof mpuBucketName, 'string');
|
assert.strictEqual(typeof mpuBucketName, 'string');
|
||||||
const { partNumber, contentMD5, size, uploadId, lastModified, splitter }
|
const { partNumber, contentMD5, size, uploadId, lastModified, splitter }
|
||||||
= metaStoreParams;
|
= metaStoreParams;
|
||||||
|
@ -734,7 +734,7 @@ const services = {
|
||||||
listParams.splitter = constants.oldSplitter;
|
listParams.splitter = constants.oldSplitter;
|
||||||
}
|
}
|
||||||
metadata.listMultipartUploads(MPUbucketName, listParams, log,
|
metadata.listMultipartUploads(MPUbucketName, listParams, log,
|
||||||
cb);
|
cb);
|
||||||
return undefined;
|
return undefined;
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
@ -753,7 +753,7 @@ const services = {
|
||||||
assert.strictEqual(typeof bucketName, 'string');
|
assert.strictEqual(typeof bucketName, 'string');
|
||||||
const MPUBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
const MPUBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
||||||
metadata.getBucket(MPUBucketName, log, (err, bucket) => {
|
metadata.getBucket(MPUBucketName, log, (err, bucket) => {
|
||||||
if (err && err.NoSuchBucket) {
|
if (err?.is.NoSuchBucket) {
|
||||||
log.trace('no buckets found');
|
log.trace('no buckets found');
|
||||||
const creationDate = new Date().toJSON();
|
const creationDate = new Date().toJSON();
|
||||||
const mpuBucket = new BucketInfo(MPUBucketName,
|
const mpuBucket = new BucketInfo(MPUBucketName,
|
||||||
|
|
|
@ -241,7 +241,7 @@ aclUtils.convertToXml = grantInfo => {
|
||||||
`<DisplayName>${escapeForXml(ownerInfo.displayName)}` +
|
`<DisplayName>${escapeForXml(ownerInfo.displayName)}` +
|
||||||
'</DisplayName>',
|
'</DisplayName>',
|
||||||
'</Owner>',
|
'</Owner>',
|
||||||
'<AccessControlList>'
|
'<AccessControlList>',
|
||||||
);
|
);
|
||||||
|
|
||||||
grants.forEach(grant => {
|
grants.forEach(grant => {
|
||||||
|
@ -252,29 +252,29 @@ aclUtils.convertToXml = grantInfo => {
|
||||||
if (grant.ID) {
|
if (grant.ID) {
|
||||||
xml.push('<Grantee xmlns:xsi="http://www.w3.org/2001/' +
|
xml.push('<Grantee xmlns:xsi="http://www.w3.org/2001/' +
|
||||||
'XMLSchema-instance" xsi:type="CanonicalUser">',
|
'XMLSchema-instance" xsi:type="CanonicalUser">',
|
||||||
`<ID>${grant.ID}</ID>`
|
`<ID>${grant.ID}</ID>`,
|
||||||
);
|
);
|
||||||
} else if (grant.URI) {
|
} else if (grant.URI) {
|
||||||
xml.push('<Grantee xmlns:xsi="http://www.w3.org/2001/' +
|
xml.push('<Grantee xmlns:xsi="http://www.w3.org/2001/' +
|
||||||
'XMLSchema-instance" xsi:type="Group">',
|
'XMLSchema-instance" xsi:type="Group">',
|
||||||
`<URI>${escapeForXml(grant.URI)}</URI>`
|
`<URI>${escapeForXml(grant.URI)}</URI>`,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (grant.displayName) {
|
if (grant.displayName) {
|
||||||
xml.push(`<DisplayName>${escapeForXml(grant.displayName)}` +
|
xml.push(`<DisplayName>${escapeForXml(grant.displayName)}` +
|
||||||
'</DisplayName>'
|
'</DisplayName>',
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
xml.push('</Grantee>',
|
xml.push('</Grantee>',
|
||||||
`<Permission>${grant.permission}</Permission>`,
|
`<Permission>${grant.permission}</Permission>`,
|
||||||
'</Grant>'
|
'</Grant>',
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
|
|
||||||
xml.push('</AccessControlList>',
|
xml.push('</AccessControlList>',
|
||||||
'</AccessControlPolicy>'
|
'</AccessControlPolicy>',
|
||||||
);
|
);
|
||||||
|
|
||||||
return xml.join('');
|
return xml.join('');
|
||||||
|
@ -351,7 +351,7 @@ aclUtils.getCanonicalIDs = function getCanonicalIDs(acl) {
|
||||||
acl.WRITE,
|
acl.WRITE,
|
||||||
acl.WRITE_ACP,
|
acl.WRITE_ACP,
|
||||||
acl.READ,
|
acl.READ,
|
||||||
acl.READ_ACP
|
acl.READ_ACP,
|
||||||
);
|
);
|
||||||
const uniqueGrantees = Array.from(new Set(aclGrantees));
|
const uniqueGrantees = Array.from(new Set(aclGrantees));
|
||||||
// grantees can be a mix of canonicalIDs and predefined groups in the form
|
// grantees can be a mix of canonicalIDs and predefined groups in the form
|
||||||
|
|
|
@ -70,7 +70,7 @@ function clientCheck(flightCheckOnStartUp, log, cb) {
|
||||||
// if there is an error from an external backend,
|
// if there is an error from an external backend,
|
||||||
// only return a 500 if it is on startup
|
// only return a 500 if it is on startup
|
||||||
// (flightCheckOnStartUp set to true)
|
// (flightCheckOnStartUp set to true)
|
||||||
obj[k].error && (flightCheckOnStartUp || !obj[k].external)
|
obj[k].error && (flightCheckOnStartUp || !obj[k].external),
|
||||||
);
|
);
|
||||||
if (fail) {
|
if (fail) {
|
||||||
return cb(errors.InternalError, obj);
|
return cb(errors.InternalError, obj);
|
||||||
|
@ -123,7 +123,7 @@ function healthcheckHandler(clientIP, req, res, log, statsClient) {
|
||||||
}
|
}
|
||||||
const deep = (req.url === '/_/healthcheck/deep');
|
const deep = (req.url === '/_/healthcheck/deep');
|
||||||
return routeHandler(deep, req, res, log, statsClient,
|
return routeHandler(deep, req, res, log, statsClient,
|
||||||
healthcheckEndHandler);
|
healthcheckEndHandler);
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
|
|
|
@ -68,12 +68,12 @@ function getSystemStats() {
|
||||||
idle: prev.idle + cur.idle,
|
idle: prev.idle + cur.idle,
|
||||||
irq: prev.irq + cur.irq,
|
irq: prev.irq + cur.irq,
|
||||||
}), {
|
}), {
|
||||||
user: 0,
|
user: 0,
|
||||||
nice: 0,
|
nice: 0,
|
||||||
sys: 0,
|
sys: 0,
|
||||||
idle: 0,
|
idle: 0,
|
||||||
irq: 0,
|
irq: 0,
|
||||||
});
|
});
|
||||||
|
|
||||||
return {
|
return {
|
||||||
memory: {
|
memory: {
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
"homepage": "https://github.com/scality/S3#readme",
|
"homepage": "https://github.com/scality/S3#readme",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@hapi/joi": "^17.1.0",
|
"@hapi/joi": "^17.1.0",
|
||||||
"arsenal": "git+https://github.com/scality/arsenal#7.10.15",
|
"arsenal": "git+https://github.com/scality/arsenal#7.10.23",
|
||||||
"async": "~2.5.0",
|
"async": "~2.5.0",
|
||||||
"aws-sdk": "2.905.0",
|
"aws-sdk": "2.905.0",
|
||||||
"azure-storage": "^2.1.0",
|
"azure-storage": "^2.1.0",
|
||||||
|
@ -44,7 +44,7 @@
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"bluebird": "^3.3.1",
|
"bluebird": "^3.3.1",
|
||||||
"eslint": "^2.4.0",
|
"eslint": "^8.14.0",
|
||||||
"eslint-config-airbnb": "^6.0.0",
|
"eslint-config-airbnb": "^6.0.0",
|
||||||
"eslint-config-scality": "scality/Guidelines#7.10.2",
|
"eslint-config-scality": "scality/Guidelines#7.10.2",
|
||||||
"ioredis": "4.9.5",
|
"ioredis": "4.9.5",
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
'use strict'; // eslint-disable-line strict
|
||||||
require('./test.js'); // eslint-disable-line import/no-unresolved
|
require('./test.js');
|
||||||
|
|
|
@ -25,12 +25,12 @@ class BucketUtility {
|
||||||
Bucket: bucketName,
|
Bucket: bucketName,
|
||||||
ObjectLockEnabledForBucket: true,
|
ObjectLockEnabledForBucket: true,
|
||||||
}).promise()
|
}).promise()
|
||||||
.then(() => bucketName);
|
.then(() => bucketName);
|
||||||
}
|
}
|
||||||
|
|
||||||
createMany(bucketNames) {
|
createMany(bucketNames) {
|
||||||
const promises = bucketNames.map(
|
const promises = bucketNames.map(
|
||||||
bucketName => this.createOne(bucketName)
|
bucketName => this.createOne(bucketName),
|
||||||
);
|
);
|
||||||
|
|
||||||
return Promise.all(promises);
|
return Promise.all(promises);
|
||||||
|
@ -57,7 +57,7 @@ class BucketUtility {
|
||||||
|
|
||||||
deleteMany(bucketNames) {
|
deleteMany(bucketNames) {
|
||||||
const promises = bucketNames.map(
|
const promises = bucketNames.map(
|
||||||
bucketName => this.deleteOne(bucketName)
|
bucketName => this.deleteOne(bucketName),
|
||||||
);
|
);
|
||||||
|
|
||||||
return Promise.all(promises);
|
return Promise.all(promises);
|
||||||
|
@ -87,7 +87,7 @@ class BucketUtility {
|
||||||
Key: object.Key,
|
Key: object.Key,
|
||||||
VersionId: object.VersionId,
|
VersionId: object.VersionId,
|
||||||
}).promise()
|
}).promise()
|
||||||
.then(() => object)
|
.then(() => object),
|
||||||
)
|
)
|
||||||
.concat(data.Versions
|
.concat(data.Versions
|
||||||
.filter(object => object.Key.endsWith('/'))
|
.filter(object => object.Key.endsWith('/'))
|
||||||
|
@ -98,24 +98,24 @@ class BucketUtility {
|
||||||
Key: object.Key,
|
Key: object.Key,
|
||||||
VersionId: object.VersionId,
|
VersionId: object.VersionId,
|
||||||
}).promise()
|
}).promise()
|
||||||
.then(() => object)
|
.then(() => object),
|
||||||
)
|
),
|
||||||
)
|
)
|
||||||
.concat(data.DeleteMarkers
|
.concat(data.DeleteMarkers
|
||||||
.map(object =>
|
.map(object =>
|
||||||
this.s3.deleteObject({
|
this.s3.deleteObject({
|
||||||
Bucket: bucketName,
|
Bucket: bucketName,
|
||||||
Key: object.Key,
|
Key: object.Key,
|
||||||
VersionId: object.VersionId,
|
VersionId: object.VersionId,
|
||||||
}).promise()
|
}).promise()
|
||||||
.then(() => object)))
|
.then(() => object))),
|
||||||
)
|
),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
emptyMany(bucketNames) {
|
emptyMany(bucketNames) {
|
||||||
const promises = bucketNames.map(
|
const promises = bucketNames.map(
|
||||||
bucketName => this.empty(bucketName)
|
bucketName => this.empty(bucketName),
|
||||||
);
|
);
|
||||||
|
|
||||||
return Promise.all(promises);
|
return Promise.all(promises);
|
||||||
|
|
|
@ -63,12 +63,12 @@ function methodRequest(params, callback) {
|
||||||
`<Code>${code}</Code>` : '';
|
`<Code>${code}</Code>` : '';
|
||||||
assert(total.indexOf(message) > -1, `Expected ${message}`);
|
assert(total.indexOf(message) > -1, `Expected ${message}`);
|
||||||
assert.deepEqual(res.statusCode, statusCode[code],
|
assert.deepEqual(res.statusCode, statusCode[code],
|
||||||
`status code expected: ${statusCode[code]}`);
|
`status code expected: ${statusCode[code]}`);
|
||||||
}
|
}
|
||||||
if (headersResponse) {
|
if (headersResponse) {
|
||||||
Object.keys(headersResponse).forEach(key => {
|
Object.keys(headersResponse).forEach(key => {
|
||||||
assert.deepEqual(res.headers[key], headersResponse[key],
|
assert.deepEqual(res.headers[key], headersResponse[key],
|
||||||
`error header: ${key}`);
|
`error header: ${key}`);
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
// if no headersResponse provided, should not have these headers
|
// if no headersResponse provided, should not have these headers
|
||||||
|
@ -77,9 +77,9 @@ function methodRequest(params, callback) {
|
||||||
'access-control-allow-methods',
|
'access-control-allow-methods',
|
||||||
'access-control-allow-credentials',
|
'access-control-allow-credentials',
|
||||||
'vary'].forEach(key => {
|
'vary'].forEach(key => {
|
||||||
assert.strictEqual(res.headers[key], undefined,
|
assert.strictEqual(res.headers[key], undefined,
|
||||||
`Error: ${key} should not have value`);
|
`Error: ${key} should not have value`);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
if (headersOmitted) {
|
if (headersOmitted) {
|
||||||
headersOmitted.forEach(key => {
|
headersOmitted.forEach(key => {
|
||||||
|
|
|
@ -23,7 +23,7 @@ function createEncryptedBucket(bucketParams, cb) {
|
||||||
if (bucketParams.CreateBucketConfiguration &&
|
if (bucketParams.CreateBucketConfiguration &&
|
||||||
bucketParams.CreateBucketConfiguration.LocationConstraint) {
|
bucketParams.CreateBucketConfiguration.LocationConstraint) {
|
||||||
locationConstraint = bucketParams.CreateBucketConfiguration
|
locationConstraint = bucketParams.CreateBucketConfiguration
|
||||||
.LocationConstraint;
|
.LocationConstraint;
|
||||||
}
|
}
|
||||||
|
|
||||||
const prog = `${__dirname}/../../../../../bin/create_encrypted_bucket.js`;
|
const prog = `${__dirname}/../../../../../bin/create_encrypted_bucket.js`;
|
||||||
|
@ -44,23 +44,23 @@ function createEncryptedBucket(bucketParams, cb) {
|
||||||
}
|
}
|
||||||
const body = [];
|
const body = [];
|
||||||
const child = childProcess.spawn(args[0], args)
|
const child = childProcess.spawn(args[0], args)
|
||||||
.on('exit', () => {
|
.on('exit', () => {
|
||||||
const hasSucceed = body.join('').split('\n').find(item => {
|
const hasSucceed = body.join('').split('\n').find(item => {
|
||||||
const json = safeJSONParse(item);
|
const json = safeJSONParse(item);
|
||||||
const test = !(json instanceof Error) && json.name === 'S3' &&
|
const test = !(json instanceof Error) && json.name === 'S3' &&
|
||||||
json.statusCode === 200;
|
json.statusCode === 200;
|
||||||
if (test) {
|
if (test) {
|
||||||
return true;
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
if (!hasSucceed) {
|
||||||
|
process.stderr.write(`${body.join('')}\n`);
|
||||||
|
return cb(new Error('Cannot create encrypted bucket'));
|
||||||
}
|
}
|
||||||
return false;
|
return cb();
|
||||||
});
|
})
|
||||||
if (!hasSucceed) {
|
.on('error', cb);
|
||||||
process.stderr.write(`${body.join('')}\n`);
|
|
||||||
return cb(new Error('Cannot create encrypted bucket'));
|
|
||||||
}
|
|
||||||
return cb();
|
|
||||||
})
|
|
||||||
.on('error', cb);
|
|
||||||
child.stdout.on('data', chunk => body.push(chunk.toString()));
|
child.stdout.on('data', chunk => body.push(chunk.toString()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -92,21 +92,21 @@ function _assertResponseHtml404(method, response, type) {
|
||||||
if (method === 'HEAD') {
|
if (method === 'HEAD') {
|
||||||
if (type === '404-no-such-bucket') {
|
if (type === '404-no-such-bucket') {
|
||||||
assert.strictEqual(response.headers['x-amz-error-code'],
|
assert.strictEqual(response.headers['x-amz-error-code'],
|
||||||
'NoSuchBucket');
|
'NoSuchBucket');
|
||||||
// Need arsenal fixed to remove period at the end
|
// Need arsenal fixed to remove period at the end
|
||||||
// so compatible with aws
|
// so compatible with aws
|
||||||
assert.strictEqual(response.headers['x-amz-error-message'],
|
assert.strictEqual(response.headers['x-amz-error-message'],
|
||||||
'The specified bucket does not exist.');
|
'The specified bucket does not exist.');
|
||||||
} else if (type === '404-no-such-website-configuration') {
|
} else if (type === '404-no-such-website-configuration') {
|
||||||
assert.strictEqual(response.headers['x-amz-error-code'],
|
assert.strictEqual(response.headers['x-amz-error-code'],
|
||||||
'NoSuchWebsiteConfiguration');
|
'NoSuchWebsiteConfiguration');
|
||||||
assert.strictEqual(response.headers['x-amz-error-message'],
|
assert.strictEqual(response.headers['x-amz-error-message'],
|
||||||
'The specified bucket does not have a website configuration');
|
'The specified bucket does not have a website configuration');
|
||||||
} else if (type === '404-not-found') {
|
} else if (type === '404-not-found') {
|
||||||
assert.strictEqual(response.headers['x-amz-error-code'],
|
assert.strictEqual(response.headers['x-amz-error-code'],
|
||||||
'NoSuchKey');
|
'NoSuchKey');
|
||||||
assert.strictEqual(response.headers['x-amz-error-message'],
|
assert.strictEqual(response.headers['x-amz-error-message'],
|
||||||
'The specified key does not exist.');
|
'The specified key does not exist.');
|
||||||
} else {
|
} else {
|
||||||
throw new Error(`'${type}' is not a recognized 404 ` +
|
throw new Error(`'${type}' is not a recognized 404 ` +
|
||||||
'error checked in the WebsiteConfigTester.checkHTML function');
|
'error checked in the WebsiteConfigTester.checkHTML function');
|
||||||
|
@ -146,9 +146,9 @@ function _assertResponseHtml403(method, response, type) {
|
||||||
if (method === 'HEAD') {
|
if (method === 'HEAD') {
|
||||||
if (type === '403-access-denied') {
|
if (type === '403-access-denied') {
|
||||||
assert.strictEqual(response.headers['x-amz-error-code'],
|
assert.strictEqual(response.headers['x-amz-error-code'],
|
||||||
'AccessDenied');
|
'AccessDenied');
|
||||||
assert.strictEqual(response.headers['x-amz-error-message'],
|
assert.strictEqual(response.headers['x-amz-error-message'],
|
||||||
'Access Denied');
|
'Access Denied');
|
||||||
} else if (type !== '403-retrieve-error-document') {
|
} else if (type !== '403-retrieve-error-document') {
|
||||||
throw new Error(`'${type}' is not a recognized 403 ` +
|
throw new Error(`'${type}' is not a recognized 403 ` +
|
||||||
'error checked in the WebsiteConfigTester.checkHTML function');
|
'error checked in the WebsiteConfigTester.checkHTML function');
|
||||||
|
@ -163,17 +163,17 @@ function _assertResponseHtml403(method, response, type) {
|
||||||
]);
|
]);
|
||||||
if (type === '403-retrieve-error-document') {
|
if (type === '403-retrieve-error-document') {
|
||||||
_assertResponseHtml(response.body, 'h3',
|
_assertResponseHtml(response.body, 'h3',
|
||||||
'An Error Occurred While Attempting to ' +
|
'An Error Occurred While Attempting to ' +
|
||||||
'Retrieve a Custom Error Document');
|
'Retrieve a Custom Error Document');
|
||||||
// start searching for second `ul` element after `h3` element
|
// start searching for second `ul` element after `h3` element
|
||||||
const startingTag = '</h3>';
|
const startingTag = '</h3>';
|
||||||
const startIndex = response.body.indexOf(startingTag)
|
const startIndex = response.body.indexOf(startingTag)
|
||||||
+ startingTag.length;
|
+ startingTag.length;
|
||||||
_assertResponseHtml(response.body.slice(startIndex),
|
_assertResponseHtml(response.body.slice(startIndex),
|
||||||
'ul', [
|
'ul', [
|
||||||
'Code: AccessDenied',
|
'Code: AccessDenied',
|
||||||
'Message: Access Denied',
|
'Message: Access Denied',
|
||||||
]);
|
]);
|
||||||
} else if (type !== '403-access-denied') {
|
} else if (type !== '403-access-denied') {
|
||||||
throw new Error(`'${type}' is not a recognized 403 ` +
|
throw new Error(`'${type}' is not a recognized 403 ` +
|
||||||
'error checked in the WebsiteConfigTester.checkHTML function');
|
'error checked in the WebsiteConfigTester.checkHTML function');
|
||||||
|
@ -213,9 +213,9 @@ function _assertResponseHtmlRedirect(response, type, redirectUrl, method) {
|
||||||
// no need to check HTML
|
// no need to check HTML
|
||||||
}
|
}
|
||||||
_assertResponseHtml(response.body, 'title',
|
_assertResponseHtml(response.body, 'title',
|
||||||
'Best redirect link ever');
|
'Best redirect link ever');
|
||||||
_assertResponseHtml(response.body, 'h1',
|
_assertResponseHtml(response.body, 'h1',
|
||||||
'Welcome to your redirection file');
|
'Welcome to your redirection file');
|
||||||
} else {
|
} else {
|
||||||
throw new Error(`'${type}' is not a recognized redirect type ` +
|
throw new Error(`'${type}' is not a recognized redirect type ` +
|
||||||
'checked in the WebsiteConfigTester.checkHTML function');
|
'checked in the WebsiteConfigTester.checkHTML function');
|
||||||
|
@ -327,29 +327,29 @@ class WebsiteConfigTester {
|
||||||
|
|
||||||
static createPutBucketWebsite(s3, bucket, bucketACL, objects, done) {
|
static createPutBucketWebsite(s3, bucket, bucketACL, objects, done) {
|
||||||
s3.createBucket({ Bucket: bucket, ACL: bucketACL },
|
s3.createBucket({ Bucket: bucket, ACL: bucketACL },
|
||||||
err => {
|
err => {
|
||||||
if (err) {
|
|
||||||
return done(err);
|
|
||||||
}
|
|
||||||
const webConfig = new WebsiteConfigTester('index.html',
|
|
||||||
'error.html');
|
|
||||||
return s3.putBucketWebsite({ Bucket: bucket,
|
|
||||||
WebsiteConfiguration: webConfig }, err => {
|
|
||||||
if (err) {
|
if (err) {
|
||||||
return done(err);
|
return done(err);
|
||||||
}
|
}
|
||||||
return async.forEachOf(objects,
|
const webConfig = new WebsiteConfigTester('index.html',
|
||||||
(acl, object, next) => {
|
'error.html');
|
||||||
s3.putObject({ Bucket: bucket,
|
return s3.putBucketWebsite({ Bucket: bucket,
|
||||||
Key: `${object}.html`,
|
WebsiteConfiguration: webConfig }, err => {
|
||||||
ACL: acl,
|
if (err) {
|
||||||
Body: fs.readFileSync(path.join(__dirname,
|
return done(err);
|
||||||
`/../../test/object/websiteFiles/${object}.html`)),
|
}
|
||||||
},
|
return async.forEachOf(objects,
|
||||||
next);
|
(acl, object, next) => {
|
||||||
}, done);
|
s3.putObject({ Bucket: bucket,
|
||||||
|
Key: `${object}.html`,
|
||||||
|
ACL: acl,
|
||||||
|
Body: fs.readFileSync(path.join(__dirname,
|
||||||
|
`/../../test/object/websiteFiles/${object}.html`)),
|
||||||
|
},
|
||||||
|
next);
|
||||||
|
}, done);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static deleteObjectsThenBucket(s3, bucket, objects, done) {
|
static deleteObjectsThenBucket(s3, bucket, objects, done) {
|
||||||
|
|
|
@ -179,7 +179,7 @@ withV4(sigCfg => {
|
||||||
assert.notStrictEqual(err, null);
|
assert.notStrictEqual(err, null);
|
||||||
assert.strictEqual(
|
assert.strictEqual(
|
||||||
err.statusCode,
|
err.statusCode,
|
||||||
errors.AccessDenied.code
|
errors.AccessDenied.code,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
done();
|
done();
|
||||||
|
|
|
@ -52,7 +52,7 @@ describe('aws-sdk test delete bucket lifecycle', () => {
|
||||||
|
|
||||||
it('should return AccessDenied if user is not bucket owner', done => {
|
it('should return AccessDenied if user is not bucket owner', done => {
|
||||||
otherAccountS3.deleteBucketLifecycle({ Bucket: bucket },
|
otherAccountS3.deleteBucketLifecycle({ Bucket: bucket },
|
||||||
err => assertError(err, 'AccessDenied', done));
|
err => assertError(err, 'AccessDenied', done));
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return no error if no lifecycle config on bucket', done => {
|
it('should return no error if no lifecycle config on bucket', done => {
|
||||||
|
@ -68,8 +68,8 @@ describe('aws-sdk test delete bucket lifecycle', () => {
|
||||||
s3.deleteBucketLifecycle({ Bucket: bucket }, err => {
|
s3.deleteBucketLifecycle({ Bucket: bucket }, err => {
|
||||||
assert.equal(err, null);
|
assert.equal(err, null);
|
||||||
s3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
s3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
||||||
err =>
|
err =>
|
||||||
assertError(err, 'NoSuchLifecycleConfiguration', done));
|
assertError(err, 'NoSuchLifecycleConfiguration', done));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -54,7 +54,7 @@ describe('aws-sdk test delete bucket policy', () => {
|
||||||
|
|
||||||
it('should return MethodNotAllowed if user is not bucket owner', done => {
|
it('should return MethodNotAllowed if user is not bucket owner', done => {
|
||||||
otherAccountS3.deleteBucketPolicy({ Bucket: bucket },
|
otherAccountS3.deleteBucketPolicy({ Bucket: bucket },
|
||||||
err => assertError(err, 'MethodNotAllowed', done));
|
err => assertError(err, 'MethodNotAllowed', done));
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return no error if no policy on bucket', done => {
|
it('should return no error if no policy on bucket', done => {
|
||||||
|
@ -69,8 +69,8 @@ describe('aws-sdk test delete bucket policy', () => {
|
||||||
s3.deleteBucketPolicy({ Bucket: bucket }, err => {
|
s3.deleteBucketPolicy({ Bucket: bucket }, err => {
|
||||||
assert.equal(err, null);
|
assert.equal(err, null);
|
||||||
s3.getBucketPolicy({ Bucket: bucket },
|
s3.getBucketPolicy({ Bucket: bucket },
|
||||||
err =>
|
err =>
|
||||||
assertError(err, 'NoSuchBucketPolicy', done));
|
assertError(err, 'NoSuchBucketPolicy', done));
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -81,7 +81,7 @@ describe('aws-node-sdk test deleteBucketReplication', () => {
|
||||||
}),
|
}),
|
||||||
next => deleteReplicationAndCheckResponse(bucket, next),
|
next => deleteReplicationAndCheckResponse(bucket, next),
|
||||||
next => s3.getBucketReplication({ Bucket: bucket }, err => {
|
next => s3.getBucketReplication({ Bucket: bucket }, err => {
|
||||||
assert(errors.ReplicationConfigurationNotFoundError[err.code]);
|
assert(errors.ReplicationConfigurationNotFoundError.is[err.code]);
|
||||||
return next();
|
return next();
|
||||||
}),
|
}),
|
||||||
], done));
|
], done));
|
||||||
|
|
|
@ -43,14 +43,14 @@ describe('DELETE bucket cors', () => {
|
||||||
describe('without existing cors configuration', () => {
|
describe('without existing cors configuration', () => {
|
||||||
it('should return a 204 response', done => {
|
it('should return a 204 response', done => {
|
||||||
s3.deleteBucketCors({ Bucket: bucketName },
|
s3.deleteBucketCors({ Bucket: bucketName },
|
||||||
function deleteBucketCors(err) {
|
function deleteBucketCors(err) {
|
||||||
const statusCode = this.httpResponse.statusCode;
|
const statusCode = this.httpResponse.statusCode;
|
||||||
assert.strictEqual(statusCode, 204,
|
assert.strictEqual(statusCode, 204,
|
||||||
`Found unexpected statusCode ${statusCode}`);
|
`Found unexpected statusCode ${statusCode}`);
|
||||||
assert.strictEqual(err, null,
|
assert.strictEqual(err, null,
|
||||||
`Found unexpected err ${err}`);
|
`Found unexpected err ${err}`);
|
||||||
return done();
|
return done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -62,19 +62,19 @@ describe('DELETE bucket cors', () => {
|
||||||
|
|
||||||
it('should delete bucket configuration successfully', done => {
|
it('should delete bucket configuration successfully', done => {
|
||||||
s3.deleteBucketCors({ Bucket: bucketName },
|
s3.deleteBucketCors({ Bucket: bucketName },
|
||||||
function deleteBucketCors(err) {
|
function deleteBucketCors(err) {
|
||||||
const statusCode = this.httpResponse.statusCode;
|
const statusCode = this.httpResponse.statusCode;
|
||||||
assert.strictEqual(statusCode, 204,
|
assert.strictEqual(statusCode, 204,
|
||||||
`Found unexpected statusCode ${statusCode}`);
|
`Found unexpected statusCode ${statusCode}`);
|
||||||
assert.strictEqual(err, null,
|
assert.strictEqual(err, null,
|
||||||
`Found unexpected err ${err}`);
|
`Found unexpected err ${err}`);
|
||||||
s3.getBucketCors({ Bucket: bucketName }, err => {
|
s3.getBucketCors({ Bucket: bucketName }, err => {
|
||||||
assert.strictEqual(err.code,
|
assert.strictEqual(err.code,
|
||||||
'NoSuchCORSConfiguration');
|
'NoSuchCORSConfiguration');
|
||||||
assert.strictEqual(err.statusCode, 404);
|
assert.strictEqual(err.statusCode, 404);
|
||||||
return done();
|
return done();
|
||||||
|
});
|
||||||
});
|
});
|
||||||
});
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Skip if AWS because AWS Node SDK raises CredentialsError
|
// Skip if AWS because AWS Node SDK raises CredentialsError
|
||||||
|
@ -86,12 +86,12 @@ describe('DELETE bucket cors', () => {
|
||||||
itSkipIfAWS('should return AccessDenied if user is not bucket' +
|
itSkipIfAWS('should return AccessDenied if user is not bucket' +
|
||||||
'owner', done => {
|
'owner', done => {
|
||||||
otherAccountS3.deleteBucketCors({ Bucket: bucketName },
|
otherAccountS3.deleteBucketCors({ Bucket: bucketName },
|
||||||
err => {
|
err => {
|
||||||
assert(err);
|
assert(err);
|
||||||
assert.strictEqual(err.code, 'AccessDenied');
|
assert.strictEqual(err.code, 'AccessDenied');
|
||||||
assert.strictEqual(err.statusCode, 403);
|
assert.strictEqual(err.statusCode, 403);
|
||||||
return done();
|
return done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -59,15 +59,15 @@ describe('DELETE bucket website', () => {
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return AccessDenied if user is not bucket owner',
|
it('should return AccessDenied if user is not bucket owner',
|
||||||
done => {
|
done => {
|
||||||
otherAccountS3.deleteBucketWebsite({ Bucket: bucketName },
|
otherAccountS3.deleteBucketWebsite({ Bucket: bucketName },
|
||||||
err => {
|
err => {
|
||||||
assert(err);
|
assert(err);
|
||||||
assert.strictEqual(err.code, 'AccessDenied');
|
assert.strictEqual(err.code, 'AccessDenied');
|
||||||
assert.strictEqual(err.statusCode, 403);
|
assert.strictEqual(err.statusCode, 403);
|
||||||
return done();
|
return done();
|
||||||
|
});
|
||||||
});
|
});
|
||||||
});
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -129,7 +129,7 @@ const tests = [
|
||||||
Body: '{}' },
|
Body: '{}' },
|
||||||
{ Bucket, Key:
|
{ Bucket, Key:
|
||||||
'!exclamationPointObjTitle/!exclamationPointObjTitle',
|
'!exclamationPointObjTitle/!exclamationPointObjTitle',
|
||||||
Body: '{}' },
|
Body: '{}' },
|
||||||
{ Bucket, Key: '-dashObjTitle/' },
|
{ Bucket, Key: '-dashObjTitle/' },
|
||||||
{ Bucket, Key: '-dashObjTitle/objTitleA', Body: '{}' },
|
{ Bucket, Key: '-dashObjTitle/objTitleA', Body: '{}' },
|
||||||
{ Bucket, Key: '-dashObjTitle/-dashObjTitle', Body: '{}' },
|
{ Bucket, Key: '-dashObjTitle/-dashObjTitle', Body: '{}' },
|
||||||
|
@ -157,7 +157,7 @@ const tests = [
|
||||||
Body: '{}' },
|
Body: '{}' },
|
||||||
{ Bucket, Key:
|
{ Bucket, Key:
|
||||||
'山chineseMountainObjTitle/山chineseMountainObjTitle',
|
'山chineseMountainObjTitle/山chineseMountainObjTitle',
|
||||||
Body: '{}' },
|
Body: '{}' },
|
||||||
{ Bucket, Key: 'àaGraveLowerCaseObjTitle' },
|
{ Bucket, Key: 'àaGraveLowerCaseObjTitle' },
|
||||||
{ Bucket, Key: 'àaGraveLowerCaseObjTitle/objTitleA',
|
{ Bucket, Key: 'àaGraveLowerCaseObjTitle/objTitleA',
|
||||||
Body: '{}' },
|
Body: '{}' },
|
||||||
|
@ -294,17 +294,17 @@ describe('GET Bucket - AWS.S3.listObjects', () => {
|
||||||
before(done => {
|
before(done => {
|
||||||
bucketUtil = new BucketUtility();
|
bucketUtil = new BucketUtility();
|
||||||
bucketUtil.createRandom(1)
|
bucketUtil.createRandom(1)
|
||||||
.then(created => {
|
.then(created => {
|
||||||
bucketName = created;
|
bucketName = created;
|
||||||
done();
|
done();
|
||||||
})
|
})
|
||||||
.catch(done);
|
.catch(done);
|
||||||
});
|
});
|
||||||
|
|
||||||
after(done => {
|
after(done => {
|
||||||
bucketUtil.deleteOne(bucketName)
|
bucketUtil.deleteOne(bucketName)
|
||||||
.then(() => done())
|
.then(() => done())
|
||||||
.catch(done);
|
.catch(done);
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return 403 and AccessDenied on a private bucket', done => {
|
it('should return 403 and AccessDenied on a private bucket', done => {
|
||||||
|
@ -326,11 +326,11 @@ describe('GET Bucket - AWS.S3.listObjects', () => {
|
||||||
before(done => {
|
before(done => {
|
||||||
bucketUtil = new BucketUtility('default', sigCfg);
|
bucketUtil = new BucketUtility('default', sigCfg);
|
||||||
bucketUtil.createRandom(1)
|
bucketUtil.createRandom(1)
|
||||||
.then(created => {
|
.then(created => {
|
||||||
bucketName = created;
|
bucketName = created;
|
||||||
done();
|
done();
|
||||||
})
|
})
|
||||||
.catch(done);
|
.catch(done);
|
||||||
});
|
});
|
||||||
|
|
||||||
after(done => {
|
after(done => {
|
||||||
|
@ -492,57 +492,57 @@ describe('GET Bucket - AWS.S3.listObjects', () => {
|
||||||
|
|
||||||
['&', '"quot', '\'apos', '<lt', '>gt'].forEach(k => {
|
['&', '"quot', '\'apos', '<lt', '>gt'].forEach(k => {
|
||||||
it(`should list objects with key ${k} as ContinuationToken`,
|
it(`should list objects with key ${k} as ContinuationToken`,
|
||||||
done => {
|
done => {
|
||||||
const s3 = bucketUtil.s3;
|
const s3 = bucketUtil.s3;
|
||||||
const Bucket = bucketName;
|
const Bucket = bucketName;
|
||||||
const objects = [{ Bucket, Key: k }];
|
const objects = [{ Bucket, Key: k }];
|
||||||
|
|
||||||
Promise
|
Promise
|
||||||
.mapSeries(objects, param => s3.putObject(param).promise())
|
.mapSeries(objects, param => s3.putObject(param).promise())
|
||||||
.then(() => s3.listObjectsV2({
|
.then(() => s3.listObjectsV2({
|
||||||
Bucket,
|
Bucket,
|
||||||
ContinuationToken: generateToken(k),
|
ContinuationToken: generateToken(k),
|
||||||
}).promise())
|
}).promise())
|
||||||
.then(data => {
|
.then(data => {
|
||||||
const isValidResponse = tv4.validate(data,
|
const isValidResponse = tv4.validate(data,
|
||||||
bucketSchemaV2);
|
bucketSchemaV2);
|
||||||
if (!isValidResponse) {
|
if (!isValidResponse) {
|
||||||
throw new Error(tv4.error);
|
throw new Error(tv4.error);
|
||||||
}
|
}
|
||||||
return data;
|
return data;
|
||||||
}).then(data => {
|
}).then(data => {
|
||||||
assert.deepStrictEqual(
|
assert.deepStrictEqual(
|
||||||
decryptToken(data.ContinuationToken), k);
|
decryptToken(data.ContinuationToken), k);
|
||||||
done();
|
done();
|
||||||
})
|
})
|
||||||
.catch(done);
|
.catch(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
['&', '"quot', '\'apos', '<lt', '>gt'].forEach(k => {
|
['&', '"quot', '\'apos', '<lt', '>gt'].forEach(k => {
|
||||||
it(`should list objects with key ${k} as NextContinuationToken`,
|
it(`should list objects with key ${k} as NextContinuationToken`,
|
||||||
done => {
|
done => {
|
||||||
const s3 = bucketUtil.s3;
|
const s3 = bucketUtil.s3;
|
||||||
const Bucket = bucketName;
|
const Bucket = bucketName;
|
||||||
const objects = [{ Bucket, Key: k }, { Bucket, Key: 'zzz' }];
|
const objects = [{ Bucket, Key: k }, { Bucket, Key: 'zzz' }];
|
||||||
Promise
|
Promise
|
||||||
.mapSeries(objects, param => s3.putObject(param).promise())
|
.mapSeries(objects, param => s3.putObject(param).promise())
|
||||||
.then(() => s3.listObjectsV2({ Bucket, MaxKeys: 1,
|
.then(() => s3.listObjectsV2({ Bucket, MaxKeys: 1,
|
||||||
Delimiter: 'foo' }).promise())
|
Delimiter: 'foo' }).promise())
|
||||||
.then(data => {
|
.then(data => {
|
||||||
const isValidResponse = tv4.validate(data,
|
const isValidResponse = tv4.validate(data,
|
||||||
bucketSchemaV2);
|
bucketSchemaV2);
|
||||||
if (!isValidResponse) {
|
if (!isValidResponse) {
|
||||||
throw new Error(tv4.error);
|
throw new Error(tv4.error);
|
||||||
}
|
}
|
||||||
return data;
|
return data;
|
||||||
}).then(data => {
|
}).then(data => {
|
||||||
assert.strictEqual(
|
assert.strictEqual(
|
||||||
decryptToken(data.NextContinuationToken), k);
|
decryptToken(data.NextContinuationToken), k);
|
||||||
done();
|
done();
|
||||||
})
|
})
|
||||||
.catch(done);
|
.catch(done);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -56,7 +56,7 @@ describe('aws-sdk test get bucket encryption', () => {
|
||||||
|
|
||||||
it('should include KMSMasterKeyID if user has configured a custom master key', done => {
|
it('should include KMSMasterKeyID if user has configured a custom master key', done => {
|
||||||
setEncryptionInfo({ cryptoScheme: 1, algorithm: 'aws:kms', masterKeyId: '12345',
|
setEncryptionInfo({ cryptoScheme: 1, algorithm: 'aws:kms', masterKeyId: '12345',
|
||||||
configuredMasterKeyId: '54321', mandatory: true }, err => {
|
configuredMasterKeyId: '54321', mandatory: true }, err => {
|
||||||
assert.ifError(err);
|
assert.ifError(err);
|
||||||
s3.getBucketEncryption({ Bucket: bucketName }, (err, res) => {
|
s3.getBucketEncryption({ Bucket: bucketName }, (err, res) => {
|
||||||
assert.ifError(err);
|
assert.ifError(err);
|
||||||
|
|
|
@ -44,7 +44,7 @@ describe('aws-sdk test get bucket lifecycle', () => {
|
||||||
|
|
||||||
it('should return AccessDenied if user is not bucket owner', done => {
|
it('should return AccessDenied if user is not bucket owner', done => {
|
||||||
otherAccountS3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
otherAccountS3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
||||||
err => assertError(err, 'AccessDenied', done));
|
err => assertError(err, 'AccessDenied', done));
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return NoSuchLifecycleConfiguration error if no lifecycle ' +
|
it('should return NoSuchLifecycleConfiguration error if no lifecycle ' +
|
||||||
|
@ -68,20 +68,20 @@ describe('aws-sdk test get bucket lifecycle', () => {
|
||||||
}, err => {
|
}, err => {
|
||||||
assert.equal(err, null, `Err putting lifecycle config: ${err}`);
|
assert.equal(err, null, `Err putting lifecycle config: ${err}`);
|
||||||
s3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
s3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
||||||
(err, res) => {
|
(err, res) => {
|
||||||
assert.equal(err, null, 'Error getting lifecycle config: ' +
|
assert.equal(err, null, 'Error getting lifecycle config: ' +
|
||||||
`${err}`);
|
`${err}`);
|
||||||
assert.strictEqual(res.Rules.length, 1);
|
assert.strictEqual(res.Rules.length, 1);
|
||||||
assert.deepStrictEqual(res.Rules[0], {
|
assert.deepStrictEqual(res.Rules[0], {
|
||||||
Expiration: { Days: 1 },
|
Expiration: { Days: 1 },
|
||||||
ID: 'test-id',
|
ID: 'test-id',
|
||||||
Prefix: '',
|
Prefix: '',
|
||||||
Status: 'Enabled',
|
Status: 'Enabled',
|
||||||
Transitions: [],
|
Transitions: [],
|
||||||
NoncurrentVersionTransitions: [],
|
NoncurrentVersionTransitions: [],
|
||||||
|
});
|
||||||
|
done();
|
||||||
});
|
});
|
||||||
done();
|
|
||||||
});
|
|
||||||
}));
|
}));
|
||||||
|
|
||||||
it('should get bucket lifecycle config with filter prefix', done =>
|
it('should get bucket lifecycle config with filter prefix', done =>
|
||||||
|
@ -98,71 +98,71 @@ describe('aws-sdk test get bucket lifecycle', () => {
|
||||||
}, err => {
|
}, err => {
|
||||||
assert.equal(err, null, `Err putting lifecycle config: ${err}`);
|
assert.equal(err, null, `Err putting lifecycle config: ${err}`);
|
||||||
s3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
s3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
||||||
(err, res) => {
|
(err, res) => {
|
||||||
assert.equal(err, null, 'Error getting lifecycle config: ' +
|
assert.equal(err, null, 'Error getting lifecycle config: ' +
|
||||||
`${err}`);
|
`${err}`);
|
||||||
assert.strictEqual(res.Rules.length, 1);
|
assert.strictEqual(res.Rules.length, 1);
|
||||||
assert.deepStrictEqual(res.Rules[0], {
|
assert.deepStrictEqual(res.Rules[0], {
|
||||||
Expiration: { Days: 1 },
|
Expiration: { Days: 1 },
|
||||||
ID: 'test-id',
|
ID: 'test-id',
|
||||||
Filter: { Prefix: '' },
|
Filter: { Prefix: '' },
|
||||||
Status: 'Enabled',
|
Status: 'Enabled',
|
||||||
Transitions: [],
|
Transitions: [],
|
||||||
NoncurrentVersionTransitions: [],
|
NoncurrentVersionTransitions: [],
|
||||||
|
});
|
||||||
|
done();
|
||||||
});
|
});
|
||||||
done();
|
|
||||||
});
|
|
||||||
}));
|
}));
|
||||||
|
|
||||||
it('should get bucket lifecycle config with filter prefix and tags',
|
it('should get bucket lifecycle config with filter prefix and tags',
|
||||||
done =>
|
done =>
|
||||||
s3.putBucketLifecycleConfiguration({
|
s3.putBucketLifecycleConfiguration({
|
||||||
Bucket: bucket,
|
Bucket: bucket,
|
||||||
LifecycleConfiguration: {
|
LifecycleConfiguration: {
|
||||||
Rules: [{
|
Rules: [{
|
||||||
ID: 'test-id',
|
ID: 'test-id',
|
||||||
Status: 'Enabled',
|
Status: 'Enabled',
|
||||||
Filter: {
|
Filter: {
|
||||||
And: {
|
And: {
|
||||||
Prefix: '',
|
Prefix: '',
|
||||||
Tags: [
|
Tags: [
|
||||||
{
|
{
|
||||||
Key: 'key',
|
Key: 'key',
|
||||||
Value: 'value',
|
Value: 'value',
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
Expiration: { Days: 1 },
|
||||||
Expiration: { Days: 1 },
|
}],
|
||||||
}],
|
},
|
||||||
},
|
}, err => {
|
||||||
}, err => {
|
assert.equal(err, null, `Err putting lifecycle config: ${err}`);
|
||||||
assert.equal(err, null, `Err putting lifecycle config: ${err}`);
|
s3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
||||||
s3.getBucketLifecycleConfiguration({ Bucket: bucket },
|
(err, res) => {
|
||||||
(err, res) => {
|
assert.equal(err, null, 'Error getting lifecycle config: ' +
|
||||||
assert.equal(err, null, 'Error getting lifecycle config: ' +
|
|
||||||
`${err}`);
|
`${err}`);
|
||||||
assert.strictEqual(res.Rules.length, 1);
|
assert.strictEqual(res.Rules.length, 1);
|
||||||
assert.deepStrictEqual(res.Rules[0], {
|
assert.deepStrictEqual(res.Rules[0], {
|
||||||
Expiration: { Days: 1 },
|
Expiration: { Days: 1 },
|
||||||
ID: 'test-id',
|
ID: 'test-id',
|
||||||
Filter: {
|
Filter: {
|
||||||
And: {
|
And: {
|
||||||
Prefix: '',
|
Prefix: '',
|
||||||
Tags: [
|
Tags: [
|
||||||
{
|
{
|
||||||
Key: 'key',
|
Key: 'key',
|
||||||
Value: 'value',
|
Value: 'value',
|
||||||
|
},
|
||||||
|
],
|
||||||
},
|
},
|
||||||
],
|
},
|
||||||
},
|
Status: 'Enabled',
|
||||||
},
|
Transitions: [],
|
||||||
Status: 'Enabled',
|
NoncurrentVersionTransitions: [],
|
||||||
Transitions: [],
|
});
|
||||||
NoncurrentVersionTransitions: [],
|
done();
|
||||||
});
|
});
|
||||||
done();
|
}));
|
||||||
});
|
|
||||||
}));
|
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -48,10 +48,10 @@ describe('aws-sdk test get bucket notification', () => {
|
||||||
|
|
||||||
it('should return AccessDenied if user is not bucket owner', done => {
|
it('should return AccessDenied if user is not bucket owner', done => {
|
||||||
otherAccountS3.getBucketNotificationConfiguration({ Bucket: bucket },
|
otherAccountS3.getBucketNotificationConfiguration({ Bucket: bucket },
|
||||||
err => {
|
err => {
|
||||||
assertError(err, 'AccessDenied');
|
assertError(err, 'AccessDenied');
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should not return an error if no notification configuration ' +
|
it('should not return an error if no notification configuration ' +
|
||||||
|
@ -69,11 +69,11 @@ describe('aws-sdk test get bucket notification', () => {
|
||||||
}, err => {
|
}, err => {
|
||||||
assert.equal(err, null, `Err putting notification config: ${err}`);
|
assert.equal(err, null, `Err putting notification config: ${err}`);
|
||||||
s3.getBucketNotificationConfiguration({ Bucket: bucket },
|
s3.getBucketNotificationConfiguration({ Bucket: bucket },
|
||||||
(err, res) => {
|
(err, res) => {
|
||||||
assert.equal(err, null, `Error getting notification config: ${err}`);
|
assert.equal(err, null, `Error getting notification config: ${err}`);
|
||||||
assert.deepStrictEqual(res.QueueConfigurations, notificationConfig.QueueConfigurations);
|
assert.deepStrictEqual(res.QueueConfigurations, notificationConfig.QueueConfigurations);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -55,15 +55,15 @@ describe('aws-sdk test get bucket policy', () => {
|
||||||
|
|
||||||
it('should return MethodNotAllowed if user is not bucket owner', done => {
|
it('should return MethodNotAllowed if user is not bucket owner', done => {
|
||||||
otherAccountS3.getBucketPolicy({ Bucket: bucket },
|
otherAccountS3.getBucketPolicy({ Bucket: bucket },
|
||||||
err => assertError(err, 'MethodNotAllowed', done));
|
err => assertError(err, 'MethodNotAllowed', done));
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return NoSuchBucketPolicy error if no policy put to bucket',
|
it('should return NoSuchBucketPolicy error if no policy put to bucket',
|
||||||
done => {
|
done => {
|
||||||
s3.getBucketPolicy({ Bucket: bucket }, err => {
|
s3.getBucketPolicy({ Bucket: bucket }, err => {
|
||||||
assertError(err, 'NoSuchBucketPolicy', done);
|
assertError(err, 'NoSuchBucketPolicy', done);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
});
|
|
||||||
|
|
||||||
it('should get bucket policy', done => {
|
it('should get bucket policy', done => {
|
||||||
s3.putBucketPolicy({
|
s3.putBucketPolicy({
|
||||||
|
@ -72,13 +72,13 @@ describe('aws-sdk test get bucket policy', () => {
|
||||||
}, err => {
|
}, err => {
|
||||||
assert.equal(err, null, `Err putting bucket policy: ${err}`);
|
assert.equal(err, null, `Err putting bucket policy: ${err}`);
|
||||||
s3.getBucketPolicy({ Bucket: bucket },
|
s3.getBucketPolicy({ Bucket: bucket },
|
||||||
(err, res) => {
|
(err, res) => {
|
||||||
const parsedRes = JSON.parse(res.Policy);
|
const parsedRes = JSON.parse(res.Policy);
|
||||||
assert.equal(err, null, 'Error getting bucket policy: ' +
|
assert.equal(err, null, 'Error getting bucket policy: ' +
|
||||||
`${err}`);
|
`${err}`);
|
||||||
assert.deepStrictEqual(parsedRes.Statement[0], expectedPolicy);
|
assert.deepStrictEqual(parsedRes.Statement[0], expectedPolicy);
|
||||||
done();
|
done();
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
|
@ -45,7 +45,7 @@ describe('aws-node-sdk test getBucketReplication', () => {
|
||||||
it("should return 'ReplicationConfigurationNotFoundError' if bucket does " +
|
it("should return 'ReplicationConfigurationNotFoundError' if bucket does " +
|
||||||
'not have a replication configuration', done =>
|
'not have a replication configuration', done =>
|
||||||
s3.getBucketReplication({ Bucket: bucket }, err => {
|
s3.getBucketReplication({ Bucket: bucket }, err => {
|
||||||
assert(errors.ReplicationConfigurationNotFoundError[err.code]);
|
assert(errors.ReplicationConfigurationNotFoundError.is[err.code]);
|
||||||
return done();
|
return done();
|
||||||
}));
|
}));
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue