Compare commits
15 Commits
developmen
...
user/jbert
Author | SHA1 | Date |
---|---|---|
Jordi Bertran de Balanda | ac852727ba | |
Jordi Bertran de Balanda | 151fde6a35 | |
Jordi Bertran de Balanda | 2dc3ac6bb6 | |
Jordi Bertran de Balanda | 476da8ed62 | |
Jordi Bertran de Balanda | 27e2e2393c | |
Jordi Bertran de Balanda | ab06f6f7fb | |
Jordi Bertran de Balanda | 62d66e89ac | |
Jordi Bertran de Balanda | 1fc0efeb78 | |
Jordi Bertran de Balanda | 8c2e95e31b | |
Jordi Bertran de Balanda | e8bd68619d | |
Jordi Bertran de Balanda | 721cf4eb84 | |
Jordi Bertran de Balanda | f734af0409 | |
Jordi Bertran de Balanda | a0a8ee9002 | |
Jordi Bertran de Balanda | c773859741 | |
Jordi Bertran de Balanda | d4344d1068 |
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"extends": ["scality"],
|
||||
"plugins": ["jest"],
|
||||
"env": {
|
||||
"jest/globals": true
|
||||
}
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
module.exports = {
|
||||
presets: [
|
||||
['@babel/preset-env', { targets: { node: 'current' } }],
|
||||
'@babel/preset-typescript',
|
||||
],
|
||||
plugins: ['add-module-exports'],
|
||||
};
|
||||
|
|
@ -745,7 +745,7 @@
|
|||
"code": 404
|
||||
},
|
||||
"DBAPINotReady": {
|
||||
"message": "DBAPINotReady",
|
||||
"description": "DBAPINotReady",
|
||||
"code": 500
|
||||
},
|
||||
"NotEnoughMapsInConfig:": {
|
||||
|
|
|
@ -1,16 +1,15 @@
|
|||
module.exports = {
|
||||
export default {
|
||||
auth: require('./lib/auth/auth'),
|
||||
constants: require('./lib/constants'),
|
||||
db: require('./lib/db'),
|
||||
errors: require('./lib/errors.js'),
|
||||
errors: require('./lib/errors'),
|
||||
errorUtils: require('./lib/errorUtils'),
|
||||
shuffle: require('./lib/shuffle'),
|
||||
stringHash: require('./lib/stringHash'),
|
||||
ipCheck: require('./lib/ipCheck'),
|
||||
jsutil: require('./lib/jsutil'),
|
||||
https: {
|
||||
ciphers: require('./lib/https/ciphers.js'),
|
||||
dhparam: require('./lib/https/dh2048.js'),
|
||||
ciphers: require('./lib/https/ciphers'),
|
||||
dhparam: require('./lib/https/dh2048'),
|
||||
},
|
||||
algorithms: {
|
||||
list: require('./lib/algos/list/exportAlgos'),
|
||||
|
@ -25,23 +24,23 @@ module.exports = {
|
|||
},
|
||||
},
|
||||
policies: {
|
||||
evaluators: require('./lib/policyEvaluator/evaluator.js'),
|
||||
evaluators: require('./lib/policyEvaluator/evaluator'),
|
||||
validateUserPolicy: require('./lib/policy/policyValidator')
|
||||
.validateUserPolicy,
|
||||
evaluatePrincipal: require('./lib/policyEvaluator/principal'),
|
||||
RequestContext: require('./lib/policyEvaluator/RequestContext.js'),
|
||||
RequestContext: require('./lib/policyEvaluator/RequestContext'),
|
||||
requestUtils: require('./lib/policyEvaluator/requestUtils'),
|
||||
actionMaps: require('./lib/policyEvaluator/utils/actionMaps'),
|
||||
},
|
||||
Clustering: require('./lib/Clustering'),
|
||||
testing: {
|
||||
matrix: require('./lib/testing/matrix.js'),
|
||||
matrix: require('./lib/testing/matrix'),
|
||||
},
|
||||
versioning: {
|
||||
VersioningConstants: require('./lib/versioning/constants.js')
|
||||
VersioningConstants: require('./lib/versioning/constants')
|
||||
.VersioningConstants,
|
||||
Version: require('./lib/versioning/Version.js').Version,
|
||||
VersionID: require('./lib/versioning/VersionID.js'),
|
||||
Version: require('./lib/versioning/Version').Version,
|
||||
VersionID: require('./lib/versioning/VersionID'),
|
||||
},
|
||||
network: {
|
||||
http: {
|
||||
|
@ -57,8 +56,8 @@ module.exports = {
|
|||
probe: {
|
||||
ProbeServer: require('./lib/network/probe/ProbeServer'),
|
||||
HealthProbeServer:
|
||||
require('./lib/network/probe/HealthProbeServer.js'),
|
||||
Utils: require('./lib/network/probe/Utils.js'),
|
||||
require('./lib/network/probe/HealthProbeServer'),
|
||||
Utils: require('./lib/network/probe/Utils'),
|
||||
},
|
||||
kmip: require('./lib/network/kmip'),
|
||||
kmipClient: require('./lib/network/kmip/Client'),
|
|
@ -1,6 +1,7 @@
|
|||
'use strict'; // eslint-disable-line
|
||||
|
||||
const cluster = require('cluster');
|
||||
import * as cluster from 'cluster';
|
||||
|
||||
|
||||
class Clustering {
|
||||
/**
|
||||
|
@ -12,20 +13,31 @@ class Clustering {
|
|||
* releasing ressources
|
||||
* @return {Clustering} itself
|
||||
*/
|
||||
constructor(size, logger, shutdownTimeout) {
|
||||
this._size = size;
|
||||
size: number;
|
||||
shutdownTimeout: number;
|
||||
logger: any; // TODO logger ???
|
||||
shutdown: boolean;
|
||||
workers: cluster.Worker[];
|
||||
workersTimeout: NodeJS.Timeout[]; // TODO array of worker timeouts
|
||||
workersStatus: number[];
|
||||
status: number;
|
||||
exitCb?: Function;
|
||||
index?: number;
|
||||
|
||||
constructor(size: number, logger: any, shutdownTimeout=5000) {
|
||||
if (size < 1) {
|
||||
throw new Error('Cluster size must be greater than or equal to 1');
|
||||
}
|
||||
this._shutdownTimeout = shutdownTimeout || 5000;
|
||||
this._logger = logger;
|
||||
this._shutdown = false;
|
||||
this._workers = new Array(size).fill(undefined);
|
||||
this._workersTimeout = new Array(size).fill(undefined);
|
||||
this._workersStatus = new Array(size).fill(undefined);
|
||||
this._status = 0;
|
||||
this._exitCb = undefined; // Exit callback
|
||||
this._index = undefined;
|
||||
this.size = size;
|
||||
this.shutdownTimeout = shutdownTimeout || 5000;
|
||||
this.logger = logger;
|
||||
this.shutdown = false;
|
||||
this.workers = new Array(size).fill(undefined);
|
||||
this.workersTimeout = new Array(size).fill(undefined);
|
||||
this.workersStatus = new Array(size).fill(undefined);
|
||||
this.status = 0;
|
||||
this.exitCb = undefined; // Exit callback
|
||||
this.index = undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -34,23 +46,23 @@ class Clustering {
|
|||
* @private
|
||||
* @return {undefined}
|
||||
*/
|
||||
_afterStop() {
|
||||
_afterStop(): undefined {
|
||||
// Asuming all workers shutdown gracefully
|
||||
this._status = 0;
|
||||
const size = this._size;
|
||||
this.status = 0;
|
||||
const size = this.size;
|
||||
for (let i = 0; i < size; ++i) {
|
||||
// If the process return an error code or killed by a signal,
|
||||
// set the status
|
||||
if (typeof this._workersStatus[i] === 'number') {
|
||||
this._status = this._workersStatus[i];
|
||||
if (typeof this.workersStatus[i] === 'number') {
|
||||
this.status = this.workersStatus[i];
|
||||
break;
|
||||
} else if (typeof this._workersStatus[i] === 'string') {
|
||||
this._status = 1;
|
||||
} else if (typeof this.workersStatus[i] === 'string') {
|
||||
this.status = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (this._exitCb) {
|
||||
return this._exitCb(this);
|
||||
if (this.exitCb) {
|
||||
return this.exitCb(this);
|
||||
}
|
||||
return process.exit(this.getStatus());
|
||||
}
|
||||
|
@ -64,45 +76,47 @@ class Clustering {
|
|||
* @param {string} signal - Exit signal
|
||||
* @return {undefined}
|
||||
*/
|
||||
_workerExited(worker, i, code, signal) {
|
||||
_workerExited(
|
||||
worker: cluster.Worker, index: number, code: number, signal: number
|
||||
): undefined {
|
||||
// If the worker:
|
||||
// - was killed by a signal
|
||||
// - return an error code
|
||||
// - or just stopped
|
||||
if (signal) {
|
||||
this._logger.info('Worker killed by signal', {
|
||||
this.logger.info('Worker killed by signal', {
|
||||
signal,
|
||||
id: i,
|
||||
id: index,
|
||||
childPid: worker.process.pid,
|
||||
});
|
||||
this._workersStatus[i] = signal;
|
||||
this.workersStatus[index] = signal;
|
||||
} else if (code !== 0) {
|
||||
this._logger.error('Worker exit with code', {
|
||||
this.logger.error('Worker exit with code', {
|
||||
code,
|
||||
id: i,
|
||||
id: index,
|
||||
childPid: worker.process.pid,
|
||||
});
|
||||
this._workersStatus[i] = code;
|
||||
this.workersStatus[index] = code;
|
||||
} else {
|
||||
this._logger.info('Worker shutdown gracefully', {
|
||||
id: i,
|
||||
this.logger.info('Worker shutdown gracefully', {
|
||||
id: index,
|
||||
childPid: worker.process.pid,
|
||||
});
|
||||
this._workersStatus[i] = undefined;
|
||||
this.workersStatus[index] = undefined;
|
||||
}
|
||||
this._workers[i] = undefined;
|
||||
if (this._workersTimeout[i]) {
|
||||
clearTimeout(this._workersTimeout[i]);
|
||||
this._workersTimeout[i] = undefined;
|
||||
this.workers[index] = undefined;
|
||||
if (this.workersTimeout[index]) {
|
||||
clearTimeout(this.workersTimeout[index]);
|
||||
this.workersTimeout[index] = undefined;
|
||||
}
|
||||
// If we don't trigger the stop method, the watchdog
|
||||
// will autorestart the worker
|
||||
if (this._shutdown === false) {
|
||||
return process.nextTick(() => this.startWorker(i));
|
||||
if (this.shutdown === false) {
|
||||
return process.nextTick(() => this.startWorker(index));
|
||||
}
|
||||
// Check if an worker is still running
|
||||
if (!this._workers.every(cur => cur === undefined)) {
|
||||
return undefined;
|
||||
if (!this.workers.every(cur => cur === undefined)) {
|
||||
return;
|
||||
}
|
||||
return this._afterStop();
|
||||
}
|
||||
|
@ -113,26 +127,26 @@ class Clustering {
|
|||
* @param {number} i Index of the starting worker
|
||||
* @return {undefined}
|
||||
*/
|
||||
startWorker(i) {
|
||||
startWorker(index: number): undefined {
|
||||
if (!cluster.isMaster) {
|
||||
return;
|
||||
}
|
||||
// Fork a new worker
|
||||
this._workers[i] = cluster.fork();
|
||||
this.workers[index] = cluster.fork();
|
||||
// Listen for message from the worker
|
||||
this._workers[i].on('message', msg => {
|
||||
this.workers[index].on('message', msg => {
|
||||
// If the worker is ready, send him his id
|
||||
if (msg === 'ready') {
|
||||
this._workers[i].send({ msg: 'setup', id: i });
|
||||
this.workers[index].send({ msg: 'setup', id: index });
|
||||
}
|
||||
});
|
||||
this._workers[i].on('exit', (code, signal) =>
|
||||
this._workerExited(this._workers[i], i, code, signal));
|
||||
this.workers[index].on('exit', (code, signal) =>
|
||||
this._workerExited(this.workers[index], index, code, signal));
|
||||
// Trigger when the worker was started
|
||||
this._workers[i].on('online', () => {
|
||||
this._logger.info('Worker started', {
|
||||
id: i,
|
||||
childPid: this._workers[i].process.pid,
|
||||
this.workers[index].on('online', () => {
|
||||
this.logger.info('Worker started', {
|
||||
id: index,
|
||||
childPid: this.workers[index].process.pid,
|
||||
});
|
||||
});
|
||||
}
|
||||
|
@ -143,8 +157,8 @@ class Clustering {
|
|||
* @param {function} cb - Callback(Clustering, [exitSignal])
|
||||
* @return {Clustering} Itself
|
||||
*/
|
||||
onExit(cb) {
|
||||
this._exitCb = cb;
|
||||
onExit(cb: Function): Clustering {
|
||||
this.exitCb = cb;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -155,21 +169,21 @@ class Clustering {
|
|||
* @param {function} cb - Callback to run the worker
|
||||
* @return {Clustering} itself
|
||||
*/
|
||||
start(cb) {
|
||||
start(cb: Function): Clustering {
|
||||
process.on('SIGINT', () => this.stop('SIGINT'));
|
||||
process.on('SIGHUP', () => this.stop('SIGHUP'));
|
||||
process.on('SIGQUIT', () => this.stop('SIGQUIT'));
|
||||
process.on('SIGTERM', () => this.stop('SIGTERM'));
|
||||
process.on('SIGPIPE', () => {});
|
||||
process.on('exit', (code, signal) => {
|
||||
if (this._exitCb) {
|
||||
this._status = code || 0;
|
||||
return this._exitCb(this, signal);
|
||||
if (this.exitCb) {
|
||||
this.status = code || 0;
|
||||
return this.exitCb(this, signal);
|
||||
}
|
||||
return process.exit(code || 0);
|
||||
});
|
||||
process.on('uncaughtException', err => {
|
||||
this._logger.fatal('caught error', {
|
||||
this.logger.fatal('caught error', {
|
||||
error: err.message,
|
||||
stack: err.stack.split('\n').map(str => str.trim()),
|
||||
});
|
||||
|
@ -180,7 +194,7 @@ class Clustering {
|
|||
// know the id of the slave cluster
|
||||
process.on('message', msg => {
|
||||
if (msg.msg === 'setup') {
|
||||
this._index = msg.id;
|
||||
this.index = msg.id;
|
||||
cb(this);
|
||||
}
|
||||
});
|
||||
|
@ -188,7 +202,7 @@ class Clustering {
|
|||
// the worker has started
|
||||
process.send('ready');
|
||||
} else {
|
||||
for (let i = 0; i < this._size; ++i) {
|
||||
for (let i = 0; i < this.size; ++i) {
|
||||
this.startWorker(i);
|
||||
}
|
||||
}
|
||||
|
@ -200,8 +214,8 @@ class Clustering {
|
|||
*
|
||||
* @return {Cluster.Worker[]} Workers
|
||||
*/
|
||||
getWorkers() {
|
||||
return this._workers;
|
||||
getWorkers(): cluster.Worker[] {
|
||||
return this.workers;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -209,8 +223,8 @@ class Clustering {
|
|||
*
|
||||
* @return {number} Status code
|
||||
*/
|
||||
getStatus() {
|
||||
return this._status;
|
||||
getStatus(): number {
|
||||
return this.status;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -218,8 +232,8 @@ class Clustering {
|
|||
*
|
||||
* @return {boolean} - True if master, false otherwise
|
||||
*/
|
||||
isMaster() {
|
||||
return this._index === undefined;
|
||||
isMaster(): boolean {
|
||||
return this.index === undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -227,8 +241,8 @@ class Clustering {
|
|||
*
|
||||
* @return {number|undefined} Worker index, undefined if it's master
|
||||
*/
|
||||
getIndex() {
|
||||
return this._index;
|
||||
getIndex(): number {
|
||||
return this.index;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -237,22 +251,22 @@ class Clustering {
|
|||
* @param {string} signal - Set internally when processes killed by signal
|
||||
* @return {undefined}
|
||||
*/
|
||||
stop(signal) {
|
||||
stop(signal: string): undefined {
|
||||
if (!cluster.isMaster) {
|
||||
if (this._exitCb) {
|
||||
return this._exitCb(this, signal);
|
||||
if (this.exitCb) {
|
||||
return this.exitCb(this, signal);
|
||||
}
|
||||
return process.exit(0);
|
||||
}
|
||||
this._shutdown = true;
|
||||
return this._workers.forEach((worker, i) => {
|
||||
this.shutdown = true;
|
||||
return this.workers.forEach((worker, index) => {
|
||||
if (!worker) {
|
||||
return undefined;
|
||||
}
|
||||
this._workersTimeout[i] = setTimeout(() => {
|
||||
this.workersTimeout[index] = setTimeout(() => {
|
||||
// Kill the worker if the sigterm was ignored or take too long
|
||||
process.kill(worker.process.pid, 'SIGKILL');
|
||||
}, this._shutdownTimeout);
|
||||
}, this.shutdownTimeout);
|
||||
// Send sigterm to the process, allowing to release ressources
|
||||
// and save some states
|
||||
return process.kill(worker.process.pid, 'SIGTERM');
|
||||
|
@ -260,4 +274,4 @@ class Clustering {
|
|||
}
|
||||
}
|
||||
|
||||
module.exports = Clustering;
|
||||
export default Clustering;
|
|
@ -1,4 +1,4 @@
|
|||
const assert = require('assert');
|
||||
import { strict as assert } from 'assert';
|
||||
|
||||
/**
|
||||
* @class
|
||||
|
@ -12,9 +12,15 @@ class LRUCache {
|
|||
* @param {number} maxEntries - maximum number of entries kept in
|
||||
* the cache
|
||||
*/
|
||||
constructor(maxEntries) {
|
||||
maxEntries: number;
|
||||
private entryCount: number;
|
||||
private entryMap: object;
|
||||
private lruHead: any; // TODO lruTrail?
|
||||
private lruTail: any; // TODO lruTrail?
|
||||
|
||||
constructor(maxEntries: number) {
|
||||
assert(maxEntries >= 1);
|
||||
this._maxEntries = maxEntries;
|
||||
this.maxEntries = maxEntries;
|
||||
this.clear();
|
||||
}
|
||||
|
||||
|
@ -27,8 +33,8 @@ class LRUCache {
|
|||
* @return {boolean} true if the cache contained an entry with
|
||||
* this key, false if it did not
|
||||
*/
|
||||
add(key, value) {
|
||||
let entry = this._entryMap[key];
|
||||
add(key: string, value: object): boolean {
|
||||
let entry = this.entryMap[key];
|
||||
if (entry) {
|
||||
entry.value = value;
|
||||
// make the entry the most recently used by re-pushing it
|
||||
|
@ -37,15 +43,15 @@ class LRUCache {
|
|||
this._lruPushEntry(entry);
|
||||
return true;
|
||||
}
|
||||
if (this._entryCount === this._maxEntries) {
|
||||
if (this.entryCount === this.maxEntries) {
|
||||
// if the cache is already full, abide by the LRU strategy
|
||||
// and remove the least recently used entry from the cache
|
||||
// before pushing the new entry
|
||||
this._removeEntry(this._lruTail);
|
||||
this._removeEntry(this.lruTail);
|
||||
}
|
||||
entry = { key, value };
|
||||
this._entryMap[key] = entry;
|
||||
this._entryCount += 1;
|
||||
this.entryMap[key] = entry;
|
||||
this.entryCount += 1;
|
||||
this._lruPushEntry(entry);
|
||||
return false;
|
||||
}
|
||||
|
@ -59,8 +65,8 @@ class LRUCache {
|
|||
* exists in the cache, or undefined if not found - either if the
|
||||
* key was never added or if it has been evicted from the cache.
|
||||
*/
|
||||
get(key) {
|
||||
const entry = this._entryMap[key];
|
||||
get(key: string): object | undefined{
|
||||
const entry = this.entryMap[key];
|
||||
if (entry) {
|
||||
// make the entry the most recently used by re-pushing it
|
||||
// to the head of the LRU list
|
||||
|
@ -79,8 +85,8 @@ class LRUCache {
|
|||
* there was no entry with this key in the cache - either if the
|
||||
* key was never added or if it has been evicted from the cache.
|
||||
*/
|
||||
remove(key) {
|
||||
const entry = this._entryMap[key];
|
||||
remove(key: string): boolean {
|
||||
const entry = this.entryMap[key];
|
||||
if (entry) {
|
||||
this._removeEntry(entry);
|
||||
return true;
|
||||
|
@ -93,8 +99,8 @@ class LRUCache {
|
|||
*
|
||||
* @return {number} current number of cached entries
|
||||
*/
|
||||
count() {
|
||||
return this._entryCount;
|
||||
count(): number {
|
||||
return this.entryCount;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -102,11 +108,11 @@ class LRUCache {
|
|||
*
|
||||
* @return {undefined}
|
||||
*/
|
||||
clear() {
|
||||
this._entryMap = {};
|
||||
this._entryCount = 0;
|
||||
this._lruHead = null;
|
||||
this._lruTail = null;
|
||||
clear(): undefined {
|
||||
this.entryMap = {};
|
||||
this.entryCount = 0;
|
||||
this.lruHead = null;
|
||||
this.lruTail = null;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -116,16 +122,16 @@ class LRUCache {
|
|||
* @param {object} entry - entry to push
|
||||
* @return {undefined}
|
||||
*/
|
||||
_lruPushEntry(entry) {
|
||||
_lruPushEntry(entry: object): undefined {
|
||||
/* eslint-disable no-param-reassign */
|
||||
entry._lruNext = this._lruHead;
|
||||
entry._lruNext = this.lruHead;
|
||||
entry._lruPrev = null;
|
||||
if (this._lruHead) {
|
||||
this._lruHead._lruPrev = entry;
|
||||
if (this.lruHead) {
|
||||
this.lruHead._lruPrev = entry;
|
||||
}
|
||||
this._lruHead = entry;
|
||||
if (!this._lruTail) {
|
||||
this._lruTail = entry;
|
||||
this.lruHead = entry;
|
||||
if (!this.lruTail) {
|
||||
this.lruTail = entry;
|
||||
}
|
||||
/* eslint-enable no-param-reassign */
|
||||
}
|
||||
|
@ -136,17 +142,17 @@ class LRUCache {
|
|||
* @param {object} entry - entry to remove
|
||||
* @return {undefined}
|
||||
*/
|
||||
_lruRemoveEntry(entry) {
|
||||
_lruRemoveEntry(entry): undefined {
|
||||
/* eslint-disable no-param-reassign */
|
||||
if (entry._lruPrev) {
|
||||
entry._lruPrev._lruNext = entry._lruNext;
|
||||
} else {
|
||||
this._lruHead = entry._lruNext;
|
||||
this.lruHead = entry._lruNext;
|
||||
}
|
||||
if (entry._lruNext) {
|
||||
entry._lruNext._lruPrev = entry._lruPrev;
|
||||
} else {
|
||||
this._lruTail = entry._lruPrev;
|
||||
this.lruTail = entry._lruPrev;
|
||||
}
|
||||
/* eslint-enable no-param-reassign */
|
||||
}
|
||||
|
@ -157,11 +163,11 @@ class LRUCache {
|
|||
* @param {object} entry - cache entry to remove
|
||||
* @return {undefined}
|
||||
*/
|
||||
_removeEntry(entry) {
|
||||
_removeEntry(entry: object): undefined {
|
||||
this._lruRemoveEntry(entry);
|
||||
delete this._entryMap[entry.key];
|
||||
this._entryCount -= 1;
|
||||
delete this.entryMap[entry.key];
|
||||
this.entryCount -= 1;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = LRUCache;
|
||||
export default LRUCache;
|
|
@ -1,6 +1,6 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const { FILTER_SKIP, SKIP_NONE } = require('./tools');
|
||||
import { FILTER_SKIP, SKIP_NONE } from './tools';
|
||||
|
||||
// Use a heuristic to amortize the cost of JSON
|
||||
// serialization/deserialization only on largest metadata where the
|
||||
|
@ -22,7 +22,7 @@ const TRIM_METADATA_MIN_BLOB_SIZE = 10000;
|
|||
/**
|
||||
* Base class of listing extensions.
|
||||
*/
|
||||
class Extension {
|
||||
export class Extension {
|
||||
/**
|
||||
* This takes a list of parameters and a logger as the inputs.
|
||||
* Derivatives should have their own format regarding parameters.
|
||||
|
@ -31,7 +31,13 @@ class Extension {
|
|||
* @param {RequestLogger} logger - the logger
|
||||
* @constructor
|
||||
*/
|
||||
constructor(parameters, logger) {
|
||||
|
||||
parameters: any;
|
||||
logger: any;
|
||||
res: any;
|
||||
keys: number;
|
||||
|
||||
constructor(parameters: any, logger: any) {
|
||||
// inputs
|
||||
this.parameters = parameters;
|
||||
this.logger = logger;
|
||||
|
@ -51,7 +57,7 @@ class Extension {
|
|||
* heavy unused fields, or left untouched (depending on size
|
||||
* heuristics)
|
||||
*/
|
||||
trimMetadata(value) {
|
||||
trimMetadata(value: string): string {
|
||||
let ret = undefined;
|
||||
if (value.length >= TRIM_METADATA_MIN_BLOB_SIZE) {
|
||||
try {
|
||||
|
@ -81,7 +87,7 @@ class Extension {
|
|||
*
|
||||
* @return {object} - listing parameters for metadata
|
||||
*/
|
||||
genMDParams() {
|
||||
genMDParams(): object {
|
||||
return {};
|
||||
}
|
||||
|
||||
|
@ -96,7 +102,7 @@ class Extension {
|
|||
* = 0: entry is accepted but not included (skipping)
|
||||
* < 0: entry is not accepted, listing should finish
|
||||
*/
|
||||
filter(entry) {
|
||||
filter(entry: any): number {
|
||||
return entry ? FILTER_SKIP : FILTER_SKIP;
|
||||
}
|
||||
|
||||
|
@ -108,7 +114,7 @@ class Extension {
|
|||
* @return {string} - the insight: a common prefix or a master key,
|
||||
* or SKIP_NONE if there is no insight
|
||||
*/
|
||||
skipping() {
|
||||
skipping(): string {
|
||||
return SKIP_NONE;
|
||||
}
|
||||
|
||||
|
@ -116,9 +122,7 @@ class Extension {
|
|||
* Get the listing resutls. Format depends on derivatives' specific logic.
|
||||
* @return {Array} - The listed elements
|
||||
*/
|
||||
result() {
|
||||
result(): any {
|
||||
return this.res;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.default = Extension;
|
|
@ -1,16 +1,37 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const { inc, checkLimit, listingParamsMasterKeysV0ToV1,
|
||||
FILTER_END, FILTER_ACCEPT } = require('./tools');
|
||||
import { inc, checkLimit, listingParamsMasterKeysV0ToV1,
|
||||
FILTER_END, FILTER_ACCEPT } from './tools';
|
||||
import { VersioningConstants as VSConst} from '../../versioning/constants';
|
||||
|
||||
const DEFAULT_MAX_KEYS = 1000;
|
||||
const VSConst = require('../../versioning/constants').VersioningConstants;
|
||||
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
|
||||
|
||||
function numberDefault(num, defaultNum) {
|
||||
function numberDefault(num: string, defaultNum: number): number {
|
||||
const parsedNum = Number.parseInt(num, 10);
|
||||
return Number.isNaN(parsedNum) ? defaultNum : parsedNum;
|
||||
}
|
||||
|
||||
|
||||
interface MPUParams {
|
||||
delimiter: any;
|
||||
splitter: any;
|
||||
prefix: any; // TODO type
|
||||
uploadIdMarker: any; // TODO type
|
||||
maxKeys: string;
|
||||
queryPrefixLength: string;
|
||||
keyMarker?: any; // TODO type
|
||||
}
|
||||
|
||||
|
||||
interface V0Params {
|
||||
gt?: string;
|
||||
gte?: string;
|
||||
lt?: string;
|
||||
lte?: string;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Class for the MultipartUploads extension
|
||||
*/
|
||||
|
@ -23,7 +44,22 @@ class MultipartUploads {
|
|||
* @param {String} [vFormat] - versioning key format
|
||||
* @return {undefined}
|
||||
*/
|
||||
constructor(params, logger, vFormat) {
|
||||
params: MPUParams; // TODO param type
|
||||
vFormat: string; // TODO vFormat type
|
||||
CommonPrefixes: any[]; // TODO commonPrefixes type
|
||||
Uploads: any[]; // TODO type
|
||||
IsTruncated: boolean;
|
||||
NextKeyMarker: string;
|
||||
NextUploadIdMarker: string;
|
||||
prefixLength: number;
|
||||
queryPrefixLength: number;
|
||||
keys: number;
|
||||
maxKeys: number;
|
||||
delimiter: any; // TODO type
|
||||
splitter: any; // TODO type
|
||||
logger: any // TODO type
|
||||
|
||||
constructor(params: MPUParams, logger: any, vFormat: string) {
|
||||
this.params = params;
|
||||
this.vFormat = vFormat || BucketVersioningKeyFormat.v0;
|
||||
this.CommonPrefixes = [];
|
||||
|
@ -51,8 +87,8 @@ class MultipartUploads {
|
|||
}[this.vFormat]);
|
||||
}
|
||||
|
||||
genMDParamsV0() {
|
||||
const params = {};
|
||||
genMDParamsV0(): V0Params {
|
||||
const params: V0Params = {};
|
||||
if (this.params.keyMarker) {
|
||||
params.gt = `overview${this.params.splitter}` +
|
||||
`${this.params.keyMarker}${this.params.splitter}`;
|
||||
|
@ -74,6 +110,7 @@ class MultipartUploads {
|
|||
}
|
||||
|
||||
genMDParamsV1() {
|
||||
// TODO v1 params definition
|
||||
const v0params = this.genMDParamsV0();
|
||||
return listingParamsMasterKeysV0ToV1(v0params);
|
||||
}
|
||||
|
@ -85,7 +122,7 @@ class MultipartUploads {
|
|||
* @param {String} value - The value of the key
|
||||
* @return {undefined}
|
||||
*/
|
||||
addUpload(value) {
|
||||
addUpload(value: string): undefined {
|
||||
const tmp = JSON.parse(value);
|
||||
this.Uploads.push({
|
||||
key: tmp.key,
|
||||
|
@ -114,7 +151,7 @@ class MultipartUploads {
|
|||
* @param {String} commonPrefix - The commonPrefix to add
|
||||
* @return {undefined}
|
||||
*/
|
||||
addCommonPrefix(commonPrefix) {
|
||||
addCommonPrefix(commonPrefix: string): undefined {
|
||||
if (this.CommonPrefixes.indexOf(commonPrefix) === -1) {
|
||||
this.CommonPrefixes.push(commonPrefix);
|
||||
this.NextKeyMarker = commonPrefix;
|
||||
|
@ -122,11 +159,11 @@ class MultipartUploads {
|
|||
}
|
||||
}
|
||||
|
||||
getObjectKeyV0(obj) {
|
||||
getObjectKeyV0(obj: any) { // TODO this is an Upload value
|
||||
return obj.key;
|
||||
}
|
||||
|
||||
getObjectKeyV1(obj) {
|
||||
getObjectKeyV1(obj: any) { // TODO this is an Upload value
|
||||
return obj.key.slice(DbPrefixes.Master.length);
|
||||
}
|
||||
|
||||
|
@ -135,14 +172,14 @@ class MultipartUploads {
|
|||
* @param {String} obj - The key and value of the element
|
||||
* @return {number} - > 0: Continue, < 0: Stop
|
||||
*/
|
||||
filter(obj) {
|
||||
filter(obj: any): number {
|
||||
// Check first in case of maxkeys = 0
|
||||
if (this.keys >= this.maxKeys) {
|
||||
// In cases of maxKeys <= 0 => IsTruncated = false
|
||||
this.IsTruncated = this.maxKeys > 0;
|
||||
return FILTER_END;
|
||||
}
|
||||
const key = this.getObjectKey(obj);
|
||||
const key = this.getObjectKey(obj); // TODO this is actually valid - see ctor
|
||||
const value = obj.value;
|
||||
if (this.delimiter) {
|
||||
const mpuPrefixSlice = `overview${this.splitter}`.length;
|
||||
|
@ -162,7 +199,7 @@ class MultipartUploads {
|
|||
return FILTER_ACCEPT;
|
||||
}
|
||||
|
||||
skipping() {
|
||||
skipping(): string {
|
||||
return '';
|
||||
}
|
||||
|
||||
|
@ -170,7 +207,7 @@ class MultipartUploads {
|
|||
* Returns the formatted result
|
||||
* @return {Object} - The result.
|
||||
*/
|
||||
result() {
|
||||
result(): object {
|
||||
return {
|
||||
CommonPrefixes: this.CommonPrefixes,
|
||||
Uploads: this.Uploads,
|
||||
|
@ -183,6 +220,7 @@ class MultipartUploads {
|
|||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
export {
|
||||
MultipartUploads,
|
||||
};
|
||||
MPUParams
|
||||
}
|
|
@ -1,14 +1,27 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const Extension = require('./Extension').default;
|
||||
import { Extension } from './Extension';
|
||||
|
||||
const { checkLimit, FILTER_END, FILTER_ACCEPT, FILTER_SKIP } = require('./tools');
|
||||
import { checkLimit, FILTER_END, FILTER_ACCEPT, FILTER_SKIP } from './tools';
|
||||
const DEFAULT_MAX_KEYS = 10000;
|
||||
|
||||
|
||||
interface ListParams {
|
||||
maxKeys: number;
|
||||
filterKey: any; // TODO type
|
||||
filterKeyStartsWith: any; // TODO type
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Class of an extension doing the simple listing
|
||||
*/
|
||||
class List extends Extension {
|
||||
|
||||
maxKeys: number;
|
||||
filterKey: any;
|
||||
filterKeyStartsWith: any;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* Set the logger and the res
|
||||
|
@ -16,7 +29,7 @@ class List extends Extension {
|
|||
* @param {RequestLogger} logger - The logger of the request
|
||||
* @return {undefined}
|
||||
*/
|
||||
constructor(parameters, logger) {
|
||||
constructor(parameters: ListParams, logger: any) {
|
||||
super(parameters, logger);
|
||||
this.res = [];
|
||||
if (parameters) {
|
||||
|
@ -29,7 +42,7 @@ class List extends Extension {
|
|||
this.keys = 0;
|
||||
}
|
||||
|
||||
genMDParams() {
|
||||
genMDParams(): object {
|
||||
const params = this.parameters ? {
|
||||
gt: this.parameters.gt,
|
||||
gte: this.parameters.gte || this.parameters.start,
|
||||
|
@ -53,7 +66,7 @@ class List extends Extension {
|
|||
*
|
||||
* @return {Boolean} Returns true if matches, else false.
|
||||
*/
|
||||
customFilter(value) {
|
||||
customFilter(value: string): boolean {
|
||||
let _value;
|
||||
try {
|
||||
_value = JSON.parse(value);
|
||||
|
@ -90,7 +103,7 @@ class List extends Extension {
|
|||
* @return {number} - > 0 : continue listing
|
||||
* < 0 : listing done
|
||||
*/
|
||||
filter(elem) {
|
||||
filter(elem: object): number {
|
||||
// Check first in case of maxkeys <= 0
|
||||
if (this.keys >= this.maxKeys) {
|
||||
return FILTER_END;
|
||||
|
@ -117,7 +130,7 @@ class List extends Extension {
|
|||
* Function returning the result
|
||||
* @return {Array} - The listed elements
|
||||
*/
|
||||
result() {
|
||||
result(): any[] {
|
||||
return this.res;
|
||||
}
|
||||
}
|
|
@ -1,9 +1,9 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const Extension = require('./Extension').default;
|
||||
const { inc, listingParamsMasterKeysV0ToV1,
|
||||
FILTER_END, FILTER_ACCEPT, FILTER_SKIP } = require('./tools');
|
||||
const VSConst = require('../../versioning/constants').VersioningConstants;
|
||||
import { Extension } from './Extension';
|
||||
import { inc, listingParamsMasterKeysV0ToV1,
|
||||
FILTER_END, FILTER_ACCEPT, FILTER_SKIP } from './tools';
|
||||
import { VersioningConstants as VSConst } from '../../versioning/constants';
|
||||
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
|
||||
|
||||
/**
|
||||
|
@ -14,10 +14,41 @@ const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
|
|||
* @param {Number} delimiterIndex - 'folder' index in the path
|
||||
* @return {String} - CommonPrefix
|
||||
*/
|
||||
function getCommonPrefix(key, delimiter, delimiterIndex) {
|
||||
function getCommonPrefix(
|
||||
key: string, delimiter: string, delimiterIndex: number
|
||||
): string {
|
||||
return key.substring(0, delimiterIndex + delimiter.length);
|
||||
}
|
||||
|
||||
|
||||
interface DelimiterParams {
|
||||
delimiter: string;
|
||||
prefix: string;
|
||||
marker: string;
|
||||
maxKeys: number;
|
||||
v2: boolean;
|
||||
startAfter: string;
|
||||
continuationToken: string;
|
||||
alphabeticalOrder: boolean;
|
||||
}
|
||||
|
||||
|
||||
interface DelimiterContentItem {
|
||||
key: string;
|
||||
value: string;
|
||||
}
|
||||
|
||||
|
||||
interface DelimiterResult {
|
||||
CommonPrefixes: string[];
|
||||
Contents: DelimiterContentItem[]; // TODO type this.Contents,
|
||||
IsTruncated: boolean;
|
||||
Delimiter: string;
|
||||
NextMarker?: any; // TODO type
|
||||
NextContinuationToken?: any; // TODO type
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Handle object listing with parameters
|
||||
*
|
||||
|
@ -55,7 +86,25 @@ class Delimiter extends Extension {
|
|||
* request
|
||||
* @param {String} [vFormat] - versioning key format
|
||||
*/
|
||||
constructor(parameters, logger, vFormat) {
|
||||
|
||||
delimiter: string;
|
||||
prefix: string;
|
||||
marker: string;
|
||||
maxKeys: number;
|
||||
startAfter: string;
|
||||
continuationToken: string;
|
||||
alphabeticalOrder: boolean;
|
||||
vFormat: string;
|
||||
CommonPrefixes: string[];
|
||||
Contents: DelimiterContentItem[];
|
||||
IsTruncated: boolean;
|
||||
NextMarker: string;
|
||||
NextContinuationToken: string;
|
||||
startMarker: string;
|
||||
continueMarker: string;
|
||||
nextContinueMarker: string;
|
||||
|
||||
constructor(parameters: DelimiterParams, logger: any, vFormat: string) {
|
||||
super(parameters, logger);
|
||||
// original listing parameters
|
||||
this.delimiter = parameters.delimiter;
|
||||
|
@ -134,7 +183,7 @@ class Delimiter extends Extension {
|
|||
* final state of the result if it is the case
|
||||
* @return {Boolean} - indicates if the iteration has to stop
|
||||
*/
|
||||
_reachedMaxKeys() {
|
||||
_reachedMaxKeys(): boolean {
|
||||
if (this.keys >= this.maxKeys) {
|
||||
// In cases of maxKeys <= 0 -> IsTruncated = false
|
||||
this.IsTruncated = this.maxKeys > 0;
|
||||
|
@ -151,7 +200,7 @@ class Delimiter extends Extension {
|
|||
* @param {String} value - The value of the key
|
||||
* @return {number} - indicates if iteration should continue
|
||||
*/
|
||||
addContents(key, value) {
|
||||
addContents(key: string, value: string): number {
|
||||
if (this._reachedMaxKeys()) {
|
||||
return FILTER_END;
|
||||
}
|
||||
|
@ -180,7 +229,7 @@ class Delimiter extends Extension {
|
|||
* @param {String} obj.value - The value of the element
|
||||
* @return {number} - indicates if iteration should continue
|
||||
*/
|
||||
filter(obj) {
|
||||
filter(obj): number {
|
||||
const key = this.getObjectKey(obj);
|
||||
const value = obj.value;
|
||||
if ((this.prefix && !key.startsWith(this.prefix))
|
||||
|
@ -206,7 +255,7 @@ class Delimiter extends Extension {
|
|||
* @param {Number} index - after prefix starting point
|
||||
* @return {Boolean} - indicates if iteration should continue
|
||||
*/
|
||||
addCommonPrefix(key, index) {
|
||||
addCommonPrefix(key: string, index: number) {
|
||||
const commonPrefix = getCommonPrefix(key, this.delimiter, index);
|
||||
if (this.CommonPrefixes.indexOf(commonPrefix) === -1
|
||||
&& this[this.nextContinueMarker] !== commonPrefix) {
|
||||
|
@ -228,7 +277,7 @@ class Delimiter extends Extension {
|
|||
* @return {string} - the present range (NextMarker) if repd believes
|
||||
* that it's enough and should move on
|
||||
*/
|
||||
skippingV0() {
|
||||
skippingV0(): string {
|
||||
return this[this.nextContinueMarker];
|
||||
}
|
||||
|
||||
|
@ -239,7 +288,7 @@ class Delimiter extends Extension {
|
|||
* @return {string} - the present range (NextMarker) if repd believes
|
||||
* that it's enough and should move on
|
||||
*/
|
||||
skippingV1() {
|
||||
skippingV1(): string {
|
||||
return DbPrefixes.Master + this[this.nextContinueMarker];
|
||||
}
|
||||
|
||||
|
@ -249,12 +298,12 @@ class Delimiter extends Extension {
|
|||
* isn't truncated
|
||||
* @return {Object} - following amazon format
|
||||
*/
|
||||
result() {
|
||||
result(): DelimiterResult {
|
||||
/* NextMarker is only provided when delimiter is used.
|
||||
* specified in v1 listing documentation
|
||||
* http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
|
||||
*/
|
||||
const result = {
|
||||
const result: DelimiterResult = {
|
||||
CommonPrefixes: this.CommonPrefixes,
|
||||
Contents: this.Contents,
|
||||
IsTruncated: this.IsTruncated,
|
||||
|
@ -271,4 +320,4 @@ class Delimiter extends Extension {
|
|||
}
|
||||
}
|
||||
|
||||
module.exports = { Delimiter };
|
||||
export { Delimiter, DelimiterParams };
|
|
@ -1,10 +1,11 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const Delimiter = require('./delimiter').Delimiter;
|
||||
const Version = require('../../versioning/Version').Version;
|
||||
const VSConst = require('../../versioning/constants').VersioningConstants;
|
||||
import { Delimiter } from './delimiter';
|
||||
import type { DelimiterParams } from './delimiter';
|
||||
import { Version } from '../../versioning/Version';
|
||||
import { VersioningConstants as VSConst } from '../../versioning/constants';
|
||||
const { BucketVersioningKeyFormat } = VSConst;
|
||||
const { FILTER_ACCEPT, FILTER_SKIP, SKIP_NONE } = require('./tools');
|
||||
import { FILTER_ACCEPT, FILTER_SKIP, SKIP_NONE } from './tools';
|
||||
|
||||
const VID_SEP = VSConst.VersionId.Separator;
|
||||
const { DbPrefixes } = VSConst;
|
||||
|
@ -27,7 +28,11 @@ class DelimiterMaster extends Delimiter {
|
|||
* @param {RequestLogger} logger - The logger of the request
|
||||
* @param {String} [vFormat] - versioning key format
|
||||
*/
|
||||
constructor(parameters, logger, vFormat) {
|
||||
prvKey?: any; // TODO type
|
||||
prvPHDKey?: any; // TODO type
|
||||
inReplayPrefix?: any; // TODO type
|
||||
|
||||
constructor(parameters: DelimiterParams, logger: any, vFormat: string) {
|
||||
super(parameters, logger, vFormat);
|
||||
// non-PHD master version or a version whose master is a PHD version
|
||||
this.prvKey = undefined;
|
||||
|
@ -58,7 +63,7 @@ class DelimiterMaster extends Delimiter {
|
|||
* @param {String} obj.value - The value of the element
|
||||
* @return {number} - indicates if iteration should continue
|
||||
*/
|
||||
filterV0(obj) {
|
||||
filterV0(obj: object): number {
|
||||
let key = obj.key;
|
||||
const value = obj.value;
|
||||
|
||||
|
@ -155,14 +160,14 @@ class DelimiterMaster extends Delimiter {
|
|||
* @param {String} obj.value - The value of the element
|
||||
* @return {number} - indicates if iteration should continue
|
||||
*/
|
||||
filterV1(obj) {
|
||||
filterV1(obj: object): number {
|
||||
// Filtering master keys in v1 is simply listing the master
|
||||
// keys, as the state of version keys do not change the
|
||||
// result, so we can use Delimiter method directly.
|
||||
return super.filter(obj);
|
||||
}
|
||||
|
||||
skippingBase() {
|
||||
skippingBase(): string {
|
||||
if (this[this.nextContinueMarker]) {
|
||||
// next marker or next continuation token:
|
||||
// - foo/ : skipping foo/
|
||||
|
@ -177,14 +182,14 @@ class DelimiterMaster extends Delimiter {
|
|||
return SKIP_NONE;
|
||||
}
|
||||
|
||||
skippingV0() {
|
||||
skippingV0(): string {
|
||||
if (this.inReplayPrefix) {
|
||||
return DbPrefixes.Replay;
|
||||
}
|
||||
return this.skippingBase();
|
||||
}
|
||||
|
||||
skippingV1() {
|
||||
skippingV1(): string {
|
||||
const skipTo = this.skippingBase();
|
||||
if (skipTo === SKIP_NONE) {
|
||||
return SKIP_NONE;
|
||||
|
@ -193,4 +198,4 @@ class DelimiterMaster extends Delimiter {
|
|||
}
|
||||
}
|
||||
|
||||
module.exports = { DelimiterMaster };
|
||||
export { DelimiterMaster };
|
|
@ -1,14 +1,32 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const Delimiter = require('./delimiter').Delimiter;
|
||||
const Version = require('../../versioning/Version').Version;
|
||||
const VSConst = require('../../versioning/constants').VersioningConstants;
|
||||
const { inc, FILTER_END, FILTER_ACCEPT, FILTER_SKIP, SKIP_NONE } =
|
||||
require('./tools');
|
||||
import { Delimiter } from './delimiter';
|
||||
import type { DelimiterParams } from './delimiter';
|
||||
import type { MPUParams } from './MPU';
|
||||
import { Version } from '../../versioning/Version';
|
||||
import { VersioningConstants as VSConst } from '../../versioning/constants';
|
||||
import { inc, FILTER_END, FILTER_ACCEPT, FILTER_SKIP, SKIP_NONE } from './tools';
|
||||
|
||||
const VID_SEP = VSConst.VersionId.Separator;
|
||||
const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
|
||||
|
||||
|
||||
interface DelimiterVersionsParams extends DelimiterParams {
|
||||
keyMarker: string; // TODO type
|
||||
versionIdMarker: any; // TODO type
|
||||
}
|
||||
|
||||
|
||||
interface DelimiterVersionsResult {
|
||||
CommonPrefixes: string[];
|
||||
Versions: any; // TODO type
|
||||
IsTruncated: boolean,
|
||||
NextKeyMarker?: any; // TODO type
|
||||
NextVersionIdMarker?: any; // TODO type
|
||||
Delimiter: string;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Handle object listing with parameters
|
||||
*
|
||||
|
@ -22,7 +40,15 @@ const { DbPrefixes, BucketVersioningKeyFormat } = VSConst;
|
|||
* @prop {Number} maxKeys - number of keys to list
|
||||
*/
|
||||
class DelimiterVersions extends Delimiter {
|
||||
constructor(parameters, logger, vFormat) {
|
||||
|
||||
keyMarker: string;
|
||||
versionIdMarker: any;
|
||||
masterKey?: any; // TODO type
|
||||
masterVersionId?: any; // TODO type
|
||||
NextVersionIdMarker: any; // TODO type
|
||||
inReplayPrefix: boolean;
|
||||
|
||||
constructor(parameters: DelimiterVersionsParams, logger: any, vFormat: string) {
|
||||
super(parameters, logger, vFormat);
|
||||
// specific to version listing
|
||||
this.keyMarker = parameters.keyMarker;
|
||||
|
@ -49,7 +75,7 @@ class DelimiterVersions extends Delimiter {
|
|||
}[this.vFormat]);
|
||||
}
|
||||
|
||||
genMDParamsV0() {
|
||||
genMDParamsV0(): MPUParams {
|
||||
const params = {};
|
||||
if (this.parameters.prefix) {
|
||||
params.gte = this.parameters.prefix;
|
||||
|
@ -73,40 +99,41 @@ class DelimiterVersions extends Delimiter {
|
|||
return params;
|
||||
}
|
||||
|
||||
genMDParamsV1() {
|
||||
genMDParamsV1(): MPUParams[] {
|
||||
// return an array of two listing params sets to ask for
|
||||
// synchronized listing of M and V ranges
|
||||
const params = [{}, {}];
|
||||
const mRangeParams: MPUParams = {};
|
||||
const vRangeParams: MPUParams = {};
|
||||
if (this.parameters.prefix) {
|
||||
params[0].gte = DbPrefixes.Master + this.parameters.prefix;
|
||||
params[0].lt = DbPrefixes.Master + inc(this.parameters.prefix);
|
||||
params[1].gte = DbPrefixes.Version + this.parameters.prefix;
|
||||
params[1].lt = DbPrefixes.Version + inc(this.parameters.prefix);
|
||||
mRangeParams.gte = DbPrefixes.Master + this.parameters.prefix;
|
||||
mRangeParams.lt = DbPrefixes.Master + inc(this.parameters.prefix);
|
||||
vRangeParams.gte = DbPrefixes.Version + this.parameters.prefix;
|
||||
vRangeParams.lt = DbPrefixes.Version + inc(this.parameters.prefix);
|
||||
} else {
|
||||
params[0].gte = DbPrefixes.Master;
|
||||
params[0].lt = inc(DbPrefixes.Master); // stop after the last master key
|
||||
params[1].gte = DbPrefixes.Version;
|
||||
params[1].lt = inc(DbPrefixes.Version); // stop after the last version key
|
||||
mRangeParams.gte = DbPrefixes.Master;
|
||||
mRangeParams.lt = inc(DbPrefixes.Master); // stop after the last master key
|
||||
vRangeParams.gte = DbPrefixes.Version;
|
||||
vRangeParams.lt = inc(DbPrefixes.Version); // stop after the last version key
|
||||
}
|
||||
if (this.parameters.keyMarker) {
|
||||
if (params[1].gte <= DbPrefixes.Version + this.parameters.keyMarker) {
|
||||
delete params[0].gte;
|
||||
delete params[1].gte;
|
||||
params[0].gt = DbPrefixes.Master + inc(this.parameters.keyMarker + VID_SEP);
|
||||
if (vRangeParams.gte <= DbPrefixes.Version + this.parameters.keyMarker) {
|
||||
delete mRangeParams.gte;
|
||||
delete vRangeParams.gte;
|
||||
mRangeParams.gt = DbPrefixes.Master + inc(this.parameters.keyMarker + VID_SEP);
|
||||
if (this.parameters.versionIdMarker) {
|
||||
// versionIdMarker should always come with keyMarker
|
||||
// but may not be the other way around
|
||||
params[1].gt = DbPrefixes.Version
|
||||
vRangeParams.gt = DbPrefixes.Version
|
||||
+ this.parameters.keyMarker
|
||||
+ VID_SEP
|
||||
+ this.parameters.versionIdMarker;
|
||||
} else {
|
||||
params[1].gt = DbPrefixes.Version
|
||||
vRangeParams.gt = DbPrefixes.Version
|
||||
+ inc(this.parameters.keyMarker + VID_SEP);
|
||||
}
|
||||
}
|
||||
}
|
||||
return params;
|
||||
return [mRangeParams, vRangeParams];
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -120,7 +147,7 @@ class DelimiterVersions extends Delimiter {
|
|||
* * -1 if master key < version key
|
||||
* * 1 if master key > version key
|
||||
*/
|
||||
compareObjects(masterObj, versionObj) {
|
||||
compareObjects(masterObj: object, versionObj: object): number {
|
||||
const masterKey = masterObj.key.slice(DbPrefixes.Master.length);
|
||||
const versionKey = versionObj.key.slice(DbPrefixes.Version.length);
|
||||
return masterKey < versionKey ? -1 : 1;
|
||||
|
@ -136,7 +163,7 @@ class DelimiterVersions extends Delimiter {
|
|||
* @param {String} obj.value - The value of the key
|
||||
* @return {Boolean} - indicates if iteration should continue
|
||||
*/
|
||||
addContents(obj) {
|
||||
addContents(obj: object): boolean {
|
||||
if (this._reachedMaxKeys()) {
|
||||
return FILTER_END;
|
||||
}
|
||||
|
@ -163,7 +190,7 @@ class DelimiterVersions extends Delimiter {
|
|||
* @param {String} obj.value - The value of the element
|
||||
* @return {number} - indicates if iteration should continue
|
||||
*/
|
||||
filterV0(obj) {
|
||||
filterV0(obj: object): number {
|
||||
if (obj.key.startsWith(DbPrefixes.Replay)) {
|
||||
this.inReplayPrefix = true;
|
||||
return FILTER_SKIP;
|
||||
|
@ -189,7 +216,7 @@ class DelimiterVersions extends Delimiter {
|
|||
* @param {String} obj.value - The value of the element
|
||||
* @return {number} - indicates if iteration should continue
|
||||
*/
|
||||
filterV1(obj) {
|
||||
filterV1(obj: object): number {
|
||||
// this function receives both M and V keys, but their prefix
|
||||
// length is the same so we can remove their prefix without
|
||||
// looking at the type of key
|
||||
|
@ -197,7 +224,7 @@ class DelimiterVersions extends Delimiter {
|
|||
obj.value);
|
||||
}
|
||||
|
||||
filterCommon(key, value) {
|
||||
filterCommon(key: string, value: string): boolean {
|
||||
if (this.prefix && !key.startsWith(this.prefix)) {
|
||||
return FILTER_SKIP;
|
||||
}
|
||||
|
@ -230,7 +257,7 @@ class DelimiterVersions extends Delimiter {
|
|||
return this.addContents({ key: nonversionedKey, value, versionId });
|
||||
}
|
||||
|
||||
skippingV0() {
|
||||
skippingV0(): string {
|
||||
if (this.inReplayPrefix) {
|
||||
return DbPrefixes.Replay;
|
||||
}
|
||||
|
@ -243,7 +270,7 @@ class DelimiterVersions extends Delimiter {
|
|||
return SKIP_NONE;
|
||||
}
|
||||
|
||||
skippingV1() {
|
||||
skippingV1(): string {
|
||||
const skipV0 = this.skippingV0();
|
||||
if (skipV0 === SKIP_NONE) {
|
||||
return SKIP_NONE;
|
||||
|
@ -259,7 +286,7 @@ class DelimiterVersions extends Delimiter {
|
|||
* isn't truncated
|
||||
* @return {Object} - following amazon format
|
||||
*/
|
||||
result() {
|
||||
result(): DelimiterVersionsResult {
|
||||
/* NextMarker is only provided when delimiter is used.
|
||||
* specified in v1 listing documentation
|
||||
* http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGET.html
|
||||
|
@ -276,4 +303,4 @@ class DelimiterVersions extends Delimiter {
|
|||
}
|
||||
}
|
||||
|
||||
module.exports = { DelimiterVersions };
|
||||
export { DelimiterVersions };
|
|
@ -1,9 +0,0 @@
|
|||
module.exports = {
|
||||
Basic: require('./basic').List,
|
||||
Delimiter: require('./delimiter').Delimiter,
|
||||
DelimiterVersions: require('./delimiterVersions')
|
||||
.DelimiterVersions,
|
||||
DelimiterMaster: require('./delimiterMaster')
|
||||
.DelimiterMaster,
|
||||
MPU: require('./MPU').MultipartUploads,
|
||||
};
|
|
@ -0,0 +1,13 @@
|
|||
import { List as Basic } from './basic';
|
||||
import { Delimiter } from './delimiter';
|
||||
import { DelimiterVersions } from './delimiterVersions';
|
||||
import { DelimiterMaster } from './delimiterMaster';
|
||||
import { MultipartUploads as MPU } from './MPU';
|
||||
|
||||
export {
|
||||
Basic,
|
||||
Delimiter,
|
||||
DelimiterVersions,
|
||||
DelimiterMaster,
|
||||
MPU,
|
||||
};
|
|
@ -1,10 +1,15 @@
|
|||
const assert = require('assert');
|
||||
import { strict as assert } from 'assert';
|
||||
|
||||
const { FILTER_END, FILTER_SKIP, SKIP_NONE } = require('./tools');
|
||||
import { FILTER_END, FILTER_SKIP, SKIP_NONE } from './tools';
|
||||
|
||||
|
||||
const MAX_STREAK_LENGTH = 100;
|
||||
|
||||
interface SkipParams {
|
||||
extension: any;
|
||||
gte: any;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle the filtering and the skip mechanism of a listing result.
|
||||
*/
|
||||
|
@ -15,14 +20,23 @@ class Skip {
|
|||
* @param {String} params.gte - current range gte (greater than or
|
||||
* equal) used by the client code
|
||||
*/
|
||||
constructor(params) {
|
||||
|
||||
extension: any;
|
||||
gteParams: any;
|
||||
listingEndCb?: Function;
|
||||
skipRangeCb?: Function;
|
||||
streakLength: number;
|
||||
|
||||
constructor(params: SkipParams) {
|
||||
// TODO - once we're in strict TS everywhere, we no longer need these
|
||||
// assertions
|
||||
assert(params.extension);
|
||||
|
||||
this.extension = params.extension;
|
||||
this.gteParams = params.gte;
|
||||
|
||||
this.listingEndCb = null;
|
||||
this.skipRangeCb = null;
|
||||
this.listingEndCb = undefined;
|
||||
this.skipRangeCb = undefined;
|
||||
|
||||
/* Used to count consecutive FILTER_SKIP returned by the extension
|
||||
* filter method. Once this counter reaches MAX_STREAK_LENGTH, the
|
||||
|
@ -31,11 +45,11 @@ class Skip {
|
|||
this.streakLength = 0;
|
||||
}
|
||||
|
||||
setListingEndCb(cb) {
|
||||
setListingEndCb(cb: Function) {
|
||||
this.listingEndCb = cb;
|
||||
}
|
||||
|
||||
setSkipRangeCb(cb) {
|
||||
setSkipRangeCb(cb: Function) {
|
||||
this.skipRangeCb = cb;
|
||||
}
|
||||
|
||||
|
@ -47,9 +61,9 @@ class Skip {
|
|||
* This function calls the listing end or the skip range callbacks if
|
||||
* needed.
|
||||
*/
|
||||
filter(entry) {
|
||||
assert(this.listingEndCb);
|
||||
assert(this.skipRangeCb);
|
||||
filter(entry: object): undefined {
|
||||
assert(this.listingEndCb !== undefined);
|
||||
assert(this.skipRangeCb !== undefined);
|
||||
|
||||
const filteringResult = this.extension.filter(entry);
|
||||
const skippingRange = this.extension.skipping();
|
||||
|
@ -73,7 +87,7 @@ class Skip {
|
|||
}
|
||||
}
|
||||
|
||||
_inc(str) {
|
||||
_inc(str: string): string {
|
||||
if (!str) {
|
||||
return str;
|
||||
}
|
||||
|
@ -84,5 +98,7 @@ class Skip {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
module.exports = Skip;
|
||||
export {
|
||||
Skip,
|
||||
SkipParams
|
||||
}
|
|
@ -1,4 +1,6 @@
|
|||
const { DbPrefixes } = require('../../versioning/constants').VersioningConstants;
|
||||
import { VersioningConstants } from '../../versioning/constants';
|
||||
|
||||
const DbPrefixes = VersioningConstants.DbPrefixes;
|
||||
|
||||
// constants for extensions
|
||||
const SKIP_NONE = undefined; // to be inline with the values of NextMarker
|
||||
|
@ -15,8 +17,8 @@ const FILTER_END = -1;
|
|||
* @param {Number} limit - The limit to respect
|
||||
* @return {Number} - The parsed number || limit
|
||||
*/
|
||||
function checkLimit(number, limit) {
|
||||
const parsed = Number.parseInt(number, 10);
|
||||
function checkLimit(str: string, limit: number): number {
|
||||
const parsed = Number.parseInt(str, 10);
|
||||
const valid = !Number.isNaN(parsed) && (!limit || parsed <= limit);
|
||||
return valid ? parsed : limit;
|
||||
}
|
||||
|
@ -28,7 +30,7 @@ function checkLimit(number, limit) {
|
|||
* @return {string} - the incremented string
|
||||
* or the input if it is not valid
|
||||
*/
|
||||
function inc(str) {
|
||||
function inc(str: string): string {
|
||||
return str ? (str.slice(0, str.length - 1) +
|
||||
String.fromCharCode(str.charCodeAt(str.length - 1) + 1)) : str;
|
||||
}
|
||||
|
@ -40,7 +42,7 @@ function inc(str) {
|
|||
* @param {object} v0params - listing parameters for v0 format
|
||||
* @return {object} - listing parameters for v1 format
|
||||
*/
|
||||
function listingParamsMasterKeysV0ToV1(v0params) {
|
||||
function listingParamsMasterKeysV0ToV1(v0params: any): any {
|
||||
const v1params = Object.assign({}, v0params);
|
||||
if (v0params.gt !== undefined) {
|
||||
v1params.gt = `${DbPrefixes.Master}${v0params.gt}`;
|
||||
|
@ -59,7 +61,7 @@ function listingParamsMasterKeysV0ToV1(v0params) {
|
|||
return v1params;
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
export {
|
||||
checkLimit,
|
||||
inc,
|
||||
listingParamsMasterKeysV0ToV1,
|
|
@ -91,7 +91,7 @@ class Vault {
|
|||
requestContext: serializedRCsArr,
|
||||
},
|
||||
(err, userInfo) => vaultSignatureCb(err, userInfo,
|
||||
params.log, callback)
|
||||
params.log, callback),
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -146,7 +146,7 @@ class Vault {
|
|||
requestContext: serializedRCs,
|
||||
},
|
||||
(err, userInfo) => vaultSignatureCb(err, userInfo,
|
||||
params.log, callback, streamingV4Params)
|
||||
params.log, callback, streamingV4Params),
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -232,28 +232,28 @@ class Vault {
|
|||
*/
|
||||
getAccountIds(canonicalIDs, log, callback) {
|
||||
log.trace('getting accountIds from Vault based on canonicalIDs',
|
||||
{ canonicalIDs });
|
||||
{ canonicalIDs });
|
||||
this.client.getAccountIds(canonicalIDs,
|
||||
{ reqUid: log.getSerializedUids() },
|
||||
(err, info) => {
|
||||
if (err) {
|
||||
log.debug('received error message from vault',
|
||||
{ errorMessage: err });
|
||||
return callback(err);
|
||||
}
|
||||
const infoFromVault = info.message.body;
|
||||
log.trace('info received from vault', { infoFromVault });
|
||||
const result = {};
|
||||
/* If the accountId was not found in Vault, do not
|
||||
send the canonicalID back to the API */
|
||||
Object.keys(infoFromVault).forEach(key => {
|
||||
if (infoFromVault[key] !== 'NotFound' &&
|
||||
infoFromVault[key] !== 'WrongFormat') {
|
||||
result[key] = infoFromVault[key];
|
||||
{ reqUid: log.getSerializedUids() },
|
||||
(err, info) => {
|
||||
if (err) {
|
||||
log.debug('received error message from vault',
|
||||
{ errorMessage: err });
|
||||
return callback(err);
|
||||
}
|
||||
const infoFromVault = info.message.body;
|
||||
log.trace('info received from vault', { infoFromVault });
|
||||
const result = {};
|
||||
/* If the accountId was not found in Vault, do not
|
||||
send the canonicalID back to the API */
|
||||
Object.keys(infoFromVault).forEach(key => {
|
||||
if (infoFromVault[key] !== 'NotFound' &&
|
||||
infoFromVault[key] !== 'WrongFormat') {
|
||||
result[key] = infoFromVault[key];
|
||||
}
|
||||
});
|
||||
return callback(null, result);
|
||||
});
|
||||
return callback(null, result);
|
||||
});
|
||||
}
|
||||
|
||||
/** checkPolicies -- call Vault to evaluate policies
|
||||
|
|
|
@ -74,7 +74,7 @@ function extractParams(request, log, awsService, data) {
|
|||
version = 'v4';
|
||||
} else {
|
||||
log.trace('invalid authorization security header',
|
||||
{ header: authHeader });
|
||||
{ header: authHeader });
|
||||
return { err: errors.AccessDenied };
|
||||
}
|
||||
} else if (data.Signature) {
|
||||
|
@ -89,7 +89,7 @@ function extractParams(request, log, awsService, data) {
|
|||
if (version !== null && method !== null) {
|
||||
if (!checkFunctions[version] || !checkFunctions[version][method]) {
|
||||
log.trace('invalid auth version or method',
|
||||
{ version, authMethod: method });
|
||||
{ version, authMethod: method });
|
||||
return { err: errors.NotImplemented };
|
||||
}
|
||||
log.trace('identified auth method', { version, authMethod: method });
|
||||
|
@ -159,7 +159,7 @@ function doAuth(request, log, cb, awsService, requestContexts) {
|
|||
* @return {undefined}
|
||||
*/
|
||||
function generateV4Headers(request, data, accessKey, secretKeyValue,
|
||||
awsService, proxyPath) {
|
||||
awsService, proxyPath) {
|
||||
Object.assign(request, { headers: {} });
|
||||
const amzDate = convertUTCtoISO8601(Date.now());
|
||||
// get date without time
|
||||
|
@ -187,16 +187,16 @@ function generateV4Headers(request, data, accessKey, secretKeyValue,
|
|||
.filter(headerName =>
|
||||
headerName.startsWith('x-amz-')
|
||||
|| headerName.startsWith('x-scal-')
|
||||
|| headerName === 'host'
|
||||
|| headerName === 'host',
|
||||
).sort().join(';');
|
||||
const params = { request, signedHeaders, payloadChecksum,
|
||||
credentialScope, timestamp, query: data,
|
||||
awsService: service, proxyPath };
|
||||
const stringToSign = constructStringToSignV4(params);
|
||||
const signingKey = vaultUtilities.calculateSigningKey(secretKeyValue,
|
||||
region,
|
||||
scopeDate,
|
||||
service);
|
||||
region,
|
||||
scopeDate,
|
||||
service);
|
||||
const signature = crypto.createHmac('sha256', signingKey)
|
||||
.update(stringToSign, 'binary').digest('hex');
|
||||
const authorizationHeader = `${algorithm} Credential=${accessKey}` +
|
||||
|
|
|
@ -29,7 +29,7 @@ class ChainBackend extends BaseBackend {
|
|||
typeof client.getCanonicalIds === 'function' &&
|
||||
typeof client.getEmailAddresses === 'function' &&
|
||||
typeof client.checkPolicies === 'function' &&
|
||||
typeof client.healthcheck === 'function'
|
||||
typeof client.healthcheck === 'function',
|
||||
), 'invalid client: missing required auth backend methods');
|
||||
this._clients = clients;
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ class ChainBackend extends BaseBackend {
|
|||
signatureFromRequest,
|
||||
accessKey,
|
||||
options,
|
||||
done
|
||||
done,
|
||||
), callback);
|
||||
}
|
||||
|
||||
|
@ -67,7 +67,7 @@ class ChainBackend extends BaseBackend {
|
|||
region,
|
||||
scopeDate,
|
||||
options,
|
||||
done
|
||||
done,
|
||||
), callback);
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ class ChainBackend extends BaseBackend {
|
|||
requestContextParams,
|
||||
userArn,
|
||||
options,
|
||||
done
|
||||
done,
|
||||
), (err, res) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
|
@ -169,8 +169,8 @@ class ChainBackend extends BaseBackend {
|
|||
client.healthcheck(reqUid, (err, res) => done(null, {
|
||||
error: !!err ? err : null,
|
||||
status: res,
|
||||
})
|
||||
), (err, res) => {
|
||||
}),
|
||||
), (err, res) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
|
|
|
@ -26,20 +26,20 @@ class AuthLoader {
|
|||
.required();
|
||||
|
||||
const accountsJoi = joi.array()
|
||||
.items({
|
||||
name: joi.string().required(),
|
||||
email: joi.string().email().required(),
|
||||
arn: joi.string().required(),
|
||||
canonicalID: joi.string().required(),
|
||||
shortid: joi.string().regex(/^[0-9]{12}$/).required(),
|
||||
keys: this._joiKeysValidator,
|
||||
// backward-compat
|
||||
users: joi.array(),
|
||||
})
|
||||
.required()
|
||||
.unique('arn')
|
||||
.unique('email')
|
||||
.unique('canonicalID');
|
||||
.items({
|
||||
name: joi.string().required(),
|
||||
email: joi.string().email().required(),
|
||||
arn: joi.string().required(),
|
||||
canonicalID: joi.string().required(),
|
||||
shortid: joi.string().regex(/^[0-9]{12}$/).required(),
|
||||
keys: this._joiKeysValidator,
|
||||
// backward-compat
|
||||
users: joi.array(),
|
||||
})
|
||||
.required()
|
||||
.unique('arn')
|
||||
.unique('email')
|
||||
.unique('canonicalID');
|
||||
this._joiValidator = joi.object({ accounts: accountsJoi });
|
||||
}
|
||||
|
||||
|
@ -136,7 +136,7 @@ class AuthLoader {
|
|||
|
||||
_validateData(authData, filePath) {
|
||||
const res = joi.validate(authData, this._joiValidator,
|
||||
{ abortEarly: false });
|
||||
{ abortEarly: false });
|
||||
if (res.error) {
|
||||
this._dumpJoiErrors(res.error.details, filePath);
|
||||
return false;
|
||||
|
@ -156,7 +156,7 @@ class AuthLoader {
|
|||
'master/conf/authdata.json). Also note that support ' +
|
||||
'for account users has been dropped.',
|
||||
{ accountName: account.name, accountArn: account.arn,
|
||||
filePath });
|
||||
filePath });
|
||||
arnError = true;
|
||||
return;
|
||||
}
|
||||
|
@ -167,7 +167,7 @@ class AuthLoader {
|
|||
'https://github.com/scality/S3/blob/master/conf/' +
|
||||
'authdata.json)',
|
||||
{ accountName: account.name, accountArn: account.arn,
|
||||
filePath });
|
||||
filePath });
|
||||
arnError = true;
|
||||
return;
|
||||
}
|
||||
|
@ -176,8 +176,8 @@ class AuthLoader {
|
|||
this._log.error(
|
||||
'authentication config validation error',
|
||||
{ reason: arnObj.error.description,
|
||||
accountName: account.name, accountArn: account.arn,
|
||||
filePath });
|
||||
accountName: account.name, accountArn: account.arn,
|
||||
filePath });
|
||||
arnError = true;
|
||||
return;
|
||||
}
|
||||
|
@ -185,8 +185,8 @@ class AuthLoader {
|
|||
this._log.error(
|
||||
'authentication config validation error',
|
||||
{ reason: 'not an IAM account ARN',
|
||||
accountName: account.name, accountArn: account.arn,
|
||||
filePath });
|
||||
accountName: account.name, accountArn: account.arn,
|
||||
filePath });
|
||||
arnError = true;
|
||||
return;
|
||||
}
|
||||
|
@ -215,7 +215,7 @@ class AuthLoader {
|
|||
logInfo.context = err.context;
|
||||
}
|
||||
this._log.error('authentication config validation error',
|
||||
logInfo);
|
||||
logInfo);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ function getCanonicalizedAmzHeaders(headers, clientType) {
|
|||
// Build headerString
|
||||
return amzHeaders.reduce((headerStr, current) =>
|
||||
`${headerStr}${current[0]}:${current[1]}\n`,
|
||||
'');
|
||||
'');
|
||||
}
|
||||
|
||||
module.exports = getCanonicalizedAmzHeaders;
|
||||
|
|
|
@ -22,9 +22,9 @@ function check(request, log, data) {
|
|||
timestamp = Date.parse(timestamp);
|
||||
if (!timestamp) {
|
||||
log.debug('missing or invalid date header',
|
||||
{ method: 'auth/v2/headerAuthCheck.check' });
|
||||
{ method: 'auth/v2/headerAuthCheck.check' });
|
||||
return { err: errors.AccessDenied.
|
||||
customizeDescription('Authentication requires a valid Date or ' +
|
||||
customizeDescription('Authentication requires a valid Date or ' +
|
||||
'x-amz-date header') };
|
||||
}
|
||||
|
||||
|
|
|
@ -42,12 +42,12 @@ function check(request, log, data) {
|
|||
|
||||
if (expirationTime > currentTime + preSignedURLExpiry) {
|
||||
log.debug('expires parameter too far in future',
|
||||
{ expires: request.query.Expires });
|
||||
{ expires: request.query.Expires });
|
||||
return { err: errors.AccessDenied };
|
||||
}
|
||||
if (currentTime > expirationTime) {
|
||||
log.debug('current time exceeds expires time',
|
||||
{ expires: request.query.Expires });
|
||||
{ expires: request.query.Expires });
|
||||
return { err: errors.RequestTimeTooSkewed };
|
||||
}
|
||||
const accessKey = data.AWSAccessKeyId;
|
||||
|
|
|
@ -88,14 +88,14 @@ function check(request, log, data, awsService) {
|
|||
}
|
||||
if (!timestamp) {
|
||||
log.debug('missing or invalid date header',
|
||||
{ method: 'auth/v4/headerAuthCheck.check' });
|
||||
{ method: 'auth/v4/headerAuthCheck.check' });
|
||||
return { err: errors.AccessDenied.
|
||||
customizeDescription('Authentication requires a valid Date or ' +
|
||||
customizeDescription('Authentication requires a valid Date or ' +
|
||||
'x-amz-date header') };
|
||||
}
|
||||
|
||||
const validationResult = validateCredentials(credentialsArr, timestamp,
|
||||
log);
|
||||
log);
|
||||
if (validationResult instanceof Error) {
|
||||
log.debug('credentials in improper format', { credentialsArr,
|
||||
timestamp, validationResult });
|
||||
|
@ -134,7 +134,7 @@ function check(request, log, data, awsService) {
|
|||
} catch (err) {
|
||||
log.debug('invalid proxy_path header', { proxyPath, err });
|
||||
return { err: errors.InvalidArgument.customizeDescription(
|
||||
'invalid proxy_path header') };
|
||||
'invalid proxy_path header') };
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ function check(request, log, data) {
|
|||
}
|
||||
|
||||
const validationResult = validateCredentials(credential, timestamp,
|
||||
log);
|
||||
log);
|
||||
if (validationResult instanceof Error) {
|
||||
log.debug('credentials in improper format', { credential,
|
||||
timestamp, validationResult });
|
||||
|
@ -69,7 +69,7 @@ function check(request, log, data) {
|
|||
} catch (err) {
|
||||
log.debug('invalid proxy_path header', { proxyPath });
|
||||
return { err: errors.InvalidArgument.customizeDescription(
|
||||
'invalid proxy_path header') };
|
||||
'invalid proxy_path header') };
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -273,7 +273,7 @@ class V4Transform extends Transform {
|
|||
}
|
||||
// get next chunk
|
||||
return callback();
|
||||
}
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,20 +25,20 @@ function validateCredentials(credentials, timestamp, log) {
|
|||
log.warn('accessKey provided is wrong format', { accessKey });
|
||||
return errors.InvalidArgument;
|
||||
}
|
||||
// The scope date (format YYYYMMDD) must be same date as the timestamp
|
||||
// on the request from the x-amz-date param (if queryAuthCheck)
|
||||
// or from the x-amz-date header or date header (if headerAuthCheck)
|
||||
// Format of timestamp is ISO 8601: YYYYMMDDTHHMMSSZ.
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/
|
||||
// sigv4-query-string-auth.html
|
||||
// http://docs.aws.amazon.com/general/latest/gr/
|
||||
// sigv4-date-handling.html
|
||||
// The scope date (format YYYYMMDD) must be same date as the timestamp
|
||||
// on the request from the x-amz-date param (if queryAuthCheck)
|
||||
// or from the x-amz-date header or date header (if headerAuthCheck)
|
||||
// Format of timestamp is ISO 8601: YYYYMMDDTHHMMSSZ.
|
||||
// http://docs.aws.amazon.com/AmazonS3/latest/API/
|
||||
// sigv4-query-string-auth.html
|
||||
// http://docs.aws.amazon.com/general/latest/gr/
|
||||
// sigv4-date-handling.html
|
||||
|
||||
// convert timestamp to format of scopeDate YYYYMMDD
|
||||
// convert timestamp to format of scopeDate YYYYMMDD
|
||||
const timestampDate = timestamp.split('T')[0];
|
||||
if (scopeDate.length !== 8 || scopeDate !== timestampDate) {
|
||||
log.warn('scope date must be the same date as the timestamp date',
|
||||
{ scopeDate, timestampDate });
|
||||
{ scopeDate, timestampDate });
|
||||
return errors.RequestTimeTooSkewed;
|
||||
}
|
||||
if (service !== 's3' && service !== 'iam' && service !== 'ring' &&
|
||||
|
@ -50,7 +50,7 @@ function validateCredentials(credentials, timestamp, log) {
|
|||
}
|
||||
if (requestType !== 'aws4_request') {
|
||||
log.warn('requestType contained in params is not aws4_request',
|
||||
{ requestType });
|
||||
{ requestType });
|
||||
return errors.InvalidArgument;
|
||||
}
|
||||
return {};
|
||||
|
@ -68,7 +68,7 @@ function extractQueryParams(queryObj, log) {
|
|||
// Do not need the algorithm sent back
|
||||
if (queryObj['X-Amz-Algorithm'] !== 'AWS4-HMAC-SHA256') {
|
||||
log.warn('algorithm param incorrect',
|
||||
{ algo: queryObj['X-Amz-Algorithm'] });
|
||||
{ algo: queryObj['X-Amz-Algorithm'] });
|
||||
return authParams;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
const crypto = require('crypto');
|
||||
|
||||
import { createHash } from 'crypto';
|
||||
|
||||
// The min value here is to manage further backward compat if we
|
||||
// need it
|
||||
|
@ -10,7 +11,7 @@ const iamSecurityTokenPattern =
|
|||
new RegExp(`^[a-f0-9]{${iamSecurityTokenSizeMin},` +
|
||||
`${iamSecurityTokenSizeMax}}$`);
|
||||
|
||||
module.exports = {
|
||||
export default {
|
||||
// info about the iam security token
|
||||
iamSecurityToken: {
|
||||
min: iamSecurityTokenSizeMin,
|
||||
|
@ -92,7 +93,7 @@ module.exports = {
|
|||
replicationBackends: { aws_s3: true, azure: true, gcp: true },
|
||||
|
||||
// hex digest of sha256 hash of empty string:
|
||||
emptyStringHash: crypto.createHash('sha256')
|
||||
emptyStringHash: createHash('sha256')
|
||||
.update('', 'binary').digest('hex'),
|
||||
mpuMDStoredExternallyBackend: { aws_s3: true, gcp: true },
|
||||
// AWS sets a minimum size limit for parts except for the last part.
|
182
lib/db.js
182
lib/db.js
|
@ -1,182 +0,0 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const writeOptions = { sync: true };
|
||||
|
||||
/**
|
||||
* Like Error, but with a property set to true.
|
||||
* TODO: this is copied from kineticlib, should consolidate with the
|
||||
* future errors module
|
||||
*
|
||||
* Example: instead of:
|
||||
* const err = new Error("input is not a buffer");
|
||||
* err.badTypeInput = true;
|
||||
* throw err;
|
||||
* use:
|
||||
* throw propError("badTypeInput", "input is not a buffer");
|
||||
*
|
||||
* @param {String} propName - the property name.
|
||||
* @param {String} message - the Error message.
|
||||
* @returns {Error} the Error object.
|
||||
*/
|
||||
function propError(propName, message) {
|
||||
const err = new Error(message);
|
||||
err[propName] = true;
|
||||
return err;
|
||||
}
|
||||
|
||||
/**
|
||||
* Running transaction with multiple updates to be committed atomically
|
||||
*/
|
||||
class IndexTransaction {
|
||||
/**
|
||||
* Builds a new transaction
|
||||
*
|
||||
* @argument {Leveldb} db an open database to which the updates
|
||||
* will be applied
|
||||
*
|
||||
* @returns {IndexTransaction} a new empty transaction
|
||||
*/
|
||||
constructor(db) {
|
||||
this.operations = [];
|
||||
this.db = db;
|
||||
this.closed = false;
|
||||
this.conditions = [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a new operation to participate in this running transaction
|
||||
*
|
||||
* @argument {object} op an object with the following attributes:
|
||||
* {
|
||||
* type: 'put' or 'del',
|
||||
* key: the object key,
|
||||
* value: (optional for del) the value to store,
|
||||
* }
|
||||
*
|
||||
* @throws {Error} an error described by the following properties
|
||||
* - invalidTransactionVerb if op is not put or del
|
||||
* - pushOnCommittedTransaction if already committed
|
||||
* - missingKey if the key is missing from the op
|
||||
* - missingValue if putting without a value
|
||||
*
|
||||
* @returns {undefined}
|
||||
*/
|
||||
push(op) {
|
||||
if (this.closed) {
|
||||
throw propError('pushOnCommittedTransaction',
|
||||
'can not add ops to already committed transaction');
|
||||
}
|
||||
|
||||
if (op.type !== 'put' && op.type !== 'del') {
|
||||
throw propError('invalidTransactionVerb',
|
||||
`unknown action type: ${op.type}`);
|
||||
}
|
||||
|
||||
if (op.key === undefined) {
|
||||
throw propError('missingKey', 'missing key');
|
||||
}
|
||||
|
||||
if (op.type === 'put' && op.value === undefined) {
|
||||
throw propError('missingValue', 'missing value');
|
||||
}
|
||||
|
||||
this.operations.push(op);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a new put operation to this running transaction
|
||||
*
|
||||
* @argument {string} key - the key of the object to put
|
||||
* @argument {string} value - the value to put
|
||||
*
|
||||
* @throws {Error} an error described by the following properties
|
||||
* - pushOnCommittedTransaction if already committed
|
||||
* - missingKey if the key is missing from the op
|
||||
* - missingValue if putting without a value
|
||||
*
|
||||
* @returns {undefined}
|
||||
*
|
||||
* @see push
|
||||
*/
|
||||
put(key, value) {
|
||||
this.push({ type: 'put', key, value });
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a new del operation to this running transaction
|
||||
*
|
||||
* @argument {string} key - the key of the object to delete
|
||||
*
|
||||
* @throws {Error} an error described by the following properties
|
||||
* - pushOnCommittedTransaction if already committed
|
||||
* - missingKey if the key is missing from the op
|
||||
*
|
||||
* @returns {undefined}
|
||||
*
|
||||
* @see push
|
||||
*/
|
||||
del(key) {
|
||||
this.push({ type: 'del', key });
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a condition for the transaction
|
||||
*
|
||||
* @argument {object} condition an object with the following attributes:
|
||||
* {
|
||||
* <condition>: the object key
|
||||
* }
|
||||
* example: { notExists: 'key1' }
|
||||
*
|
||||
* @throws {Error} an error described by the following properties
|
||||
* - pushOnCommittedTransaction if already committed
|
||||
* - missingCondition if the condition is empty
|
||||
*
|
||||
* @returns {undefined}
|
||||
*/
|
||||
addCondition(condition) {
|
||||
if (this.closed) {
|
||||
throw propError('pushOnCommittedTransaction',
|
||||
'can not add conditions to already committed transaction');
|
||||
}
|
||||
if (condition === undefined || Object.keys(condition).length === 0) {
|
||||
throw propError('missingCondition', 'missing condition for conditional put');
|
||||
}
|
||||
if (typeof (condition.notExists) !== 'string') {
|
||||
throw propError('unsupportedConditionalOperation', 'missing key or supported condition');
|
||||
}
|
||||
this.conditions.push(condition);
|
||||
}
|
||||
|
||||
/**
|
||||
* Applies the queued updates in this transaction atomically.
|
||||
*
|
||||
* @argument {function} cb function to be called when the commit
|
||||
* finishes, taking an optional error argument
|
||||
*
|
||||
* @returns {undefined}
|
||||
*/
|
||||
commit(cb) {
|
||||
if (this.closed) {
|
||||
return cb(propError('alreadyCommitted',
|
||||
'transaction was already committed'));
|
||||
}
|
||||
|
||||
if (this.operations.length === 0) {
|
||||
return cb(propError('emptyTransaction',
|
||||
'tried to commit an empty transaction'));
|
||||
}
|
||||
|
||||
this.closed = true;
|
||||
writeOptions.conditions = this.conditions;
|
||||
|
||||
// The array-of-operations variant of the `batch` method
|
||||
// allows passing options such has `sync: true` whereas the
|
||||
// chained form does not.
|
||||
return this.db.batch(this.operations, writeOptions, cb);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
IndexTransaction,
|
||||
};
|
|
@ -1,4 +1,4 @@
|
|||
function reshapeExceptionError(error) {
|
||||
export function reshapeExceptionError(error: any) {
|
||||
const { message, code, stack, name } = error;
|
||||
return {
|
||||
message,
|
||||
|
@ -7,7 +7,3 @@ function reshapeExceptionError(error) {
|
|||
name,
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
reshapeExceptionError,
|
||||
};
|
|
@ -1,11 +1,22 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
import * as http from 'http';
|
||||
import errorsObj from '../errors/arsenalErrors.json';
|
||||
|
||||
/**
|
||||
* ArsenalError
|
||||
*
|
||||
* @extends {Error}
|
||||
*/
|
||||
|
||||
type Errors = Record<string, ArsenalError>;
|
||||
|
||||
class ArsenalError extends Error {
|
||||
|
||||
code: number;
|
||||
description: string;
|
||||
private static _errorMap: Errors;
|
||||
|
||||
/**
|
||||
* constructor.
|
||||
*
|
||||
|
@ -13,7 +24,7 @@ class ArsenalError extends Error {
|
|||
* @param {number} code - HTTP status code
|
||||
* @param {string} desc - Verbose description of error
|
||||
*/
|
||||
constructor(type, code, desc) {
|
||||
constructor(type: string, code: number, desc: string) {
|
||||
super(type);
|
||||
|
||||
/**
|
||||
|
@ -28,14 +39,36 @@ class ArsenalError extends Error {
|
|||
*/
|
||||
this.description = desc;
|
||||
|
||||
this[type] = true;
|
||||
(this as any)[type] = true;
|
||||
}
|
||||
|
||||
public static get errorMap () {
|
||||
if (this._errorMap !== undefined) {
|
||||
return this._errorMap;
|
||||
}
|
||||
|
||||
const errors: Errors = {};
|
||||
type ErrorDefinition = { code: number, description: string };
|
||||
type ErrorDefinitions = Record<string, ErrorDefinition | string>;
|
||||
|
||||
Object.keys(errorsObj)
|
||||
.filter(index => index !== '_comment')
|
||||
.forEach(index => {
|
||||
errors[index] = new ArsenalError(
|
||||
index,
|
||||
((errorsObj as ErrorDefinitions)[index] as ErrorDefinition).code,
|
||||
((errorsObj as ErrorDefinitions)[index] as ErrorDefinition).description
|
||||
);
|
||||
});
|
||||
this._errorMap = errors;
|
||||
return this._errorMap;
|
||||
}
|
||||
|
||||
/**
|
||||
* Output the error as a JSON string
|
||||
* @returns {string} Error as JSON string
|
||||
*/
|
||||
toString() {
|
||||
toString(): string {
|
||||
return JSON.stringify({
|
||||
errorType: this.message,
|
||||
errorMessage: this.description,
|
||||
|
@ -48,7 +81,7 @@ class ArsenalError extends Error {
|
|||
* @param { http.ServerResponse } res - Response we are responding to
|
||||
* @returns {undefined}
|
||||
*/
|
||||
writeResponse(res) {
|
||||
writeResponse(res: http.ServerResponse): void {
|
||||
res.writeHead(this.code);
|
||||
res.end(this.toString());
|
||||
}
|
||||
|
@ -60,28 +93,15 @@ class ArsenalError extends Error {
|
|||
* @param {string} description - New error description
|
||||
* @returns {ArsenalError} New error
|
||||
*/
|
||||
customizeDescription(description) {
|
||||
customizeDescription(description: string): ArsenalError {
|
||||
return new ArsenalError(this.message, this.code, description);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate an Errors instances object.
|
||||
*
|
||||
* @returns {Object.<string, ArsenalError>} - object field by arsenalError
|
||||
* instances
|
||||
*/
|
||||
function errorsGen() {
|
||||
const errors = {};
|
||||
const errorsObj = require('../errors/arsenalErrors.json');
|
||||
const errors = ArsenalError.errorMap
|
||||
|
||||
Object.keys(errorsObj)
|
||||
.filter(index => index !== '_comment')
|
||||
.forEach(index => {
|
||||
errors[index] = new ArsenalError(index, errorsObj[index].code,
|
||||
errorsObj[index].description);
|
||||
});
|
||||
return errors;
|
||||
}
|
||||
export type { ArsenalError };
|
||||
|
||||
module.exports = errorsGen();
|
||||
export default {
|
||||
...errors
|
||||
};
|
|
@ -17,9 +17,9 @@ describe('decyrptSecret', () => {
|
|||
describe('parseServiceCredentials', () => {
|
||||
const conf = {
|
||||
users: [{ accessKey,
|
||||
accountType: 'service-clueso',
|
||||
secretKey,
|
||||
userName: 'Search Service Account' }],
|
||||
accountType: 'service-clueso',
|
||||
secretKey,
|
||||
userName: 'Search Service Account' }],
|
||||
};
|
||||
const auth = JSON.stringify({ privateKey });
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const ipaddr = require('ipaddr.js');
|
||||
import * as ipaddr from 'ipaddr.js';
|
||||
|
||||
/**
|
||||
* checkIPinRangeOrMatch checks whether a given ip address is in an ip address
|
||||
|
@ -9,12 +9,15 @@ const ipaddr = require('ipaddr.js');
|
|||
* @param {object} ip - parsed ip address
|
||||
* @return {boolean} true if in range, false if not
|
||||
*/
|
||||
function checkIPinRangeOrMatch(cidr, ip) {
|
||||
function checkIPinRangeOrMatch(
|
||||
cidr: string, ip: ipaddr.IPv4 | ipaddr.IPv6
|
||||
): boolean {
|
||||
// If there is an exact match of the ip address, no need to check ranges
|
||||
if (ip.toString() === cidr) {
|
||||
return true;
|
||||
}
|
||||
let range;
|
||||
|
||||
let range: [ipaddr.IPv4 | ipaddr.IPv6, number];
|
||||
|
||||
try {
|
||||
range = ipaddr.IPv4.parseCIDR(cidr);
|
||||
|
@ -60,10 +63,17 @@ function parseIp(ip) {
|
|||
* @param {string} ip - IP address
|
||||
* @return {boolean} - true if there is match or false for no match
|
||||
*/
|
||||
function ipMatchCidrList(cidrList, ip) {
|
||||
const parsedIp = parseIp(ip);
|
||||
function ipMatchCidrList(cidrList: string[], ip: string): boolean {
|
||||
let parsedIp: ipaddr.IPv4 | ipaddr.IPv6;
|
||||
|
||||
try {
|
||||
parsedIp = parseIp(ip);
|
||||
} catch (err) {
|
||||
return false
|
||||
}
|
||||
|
||||
return cidrList.some(item => {
|
||||
let cidr;
|
||||
let cidr: string;
|
||||
// patch the cidr if range is not specified
|
||||
if (item.indexOf('/') === -1) {
|
||||
if (item.startsWith('127.')) {
|
||||
|
@ -72,6 +82,7 @@ function ipMatchCidrList(cidrList, ip) {
|
|||
cidr = `${item}/32`;
|
||||
}
|
||||
}
|
||||
// TODO cidr type error
|
||||
return checkIPinRangeOrMatch(cidr || item, parsedIp);
|
||||
});
|
||||
}
|
|
@ -1,6 +1,8 @@
|
|||
'use strict'; // eslint-disable-line
|
||||
|
||||
const debug = require('util').debuglog('jsutil');
|
||||
import { debuglog } from 'util';
|
||||
|
||||
const debug = debuglog('jsutil');
|
||||
|
||||
// JavaScript utility functions
|
||||
|
||||
|
@ -17,9 +19,9 @@ const debug = require('util').debuglog('jsutil');
|
|||
* @return {function} a callable wrapper mirroring <tt>func</tt> but
|
||||
* only calls <tt>func</tt> at first invocation.
|
||||
*/
|
||||
module.exports.once = function once(func) {
|
||||
export function once(func: Function): Function {
|
||||
const state = { called: false, res: undefined };
|
||||
return function wrapper(...args) {
|
||||
return function wrapper(...args: any) {
|
||||
if (!state.called) {
|
||||
state.called = true;
|
||||
state.res = func.apply(func, args);
|
|
@ -17,7 +17,7 @@ class RedisClient {
|
|||
method: 'RedisClient.constructor',
|
||||
redisHost: config.host,
|
||||
redisPort: config.port,
|
||||
})
|
||||
}),
|
||||
);
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -9,7 +9,6 @@ const StatsClient = require('./StatsClient');
|
|||
* rather than by seconds
|
||||
*/
|
||||
class StatsModel extends StatsClient {
|
||||
|
||||
/**
|
||||
* Utility method to convert 2d array rows to columns, and vice versa
|
||||
* See also: https://docs.ruby-lang.org/en/2.0.0/Array.html#method-i-zip
|
||||
|
|
|
@ -2,8 +2,8 @@ const promClient = require('prom-client');
|
|||
|
||||
const collectDefaultMetricsIntervalMs =
|
||||
process.env.COLLECT_DEFAULT_METRICS_INTERVAL_MS !== undefined ?
|
||||
Number.parseInt(process.env.COLLECT_DEFAULT_METRICS_INTERVAL_MS, 10) :
|
||||
10000;
|
||||
Number.parseInt(process.env.COLLECT_DEFAULT_METRICS_INTERVAL_MS, 10) :
|
||||
10000;
|
||||
|
||||
promClient.collectDefaultMetrics({ timeout: collectDefaultMetricsIntervalMs });
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ class ARN {
|
|||
|
||||
static createFromString(arnStr) {
|
||||
const [arn, partition, service, region, accountId,
|
||||
resourceType, resource] = arnStr.split(':');
|
||||
resourceType, resource] = arnStr.split(':');
|
||||
|
||||
if (arn !== 'arn') {
|
||||
return { error: errors.InvalidArgument.customizeDescription(
|
||||
|
@ -58,7 +58,7 @@ class ARN {
|
|||
'must be a 12-digit number or "*"') };
|
||||
}
|
||||
const fullResource = (resource !== undefined ?
|
||||
`${resourceType}:${resource}` : resourceType);
|
||||
`${resourceType}:${resource}` : resourceType);
|
||||
return new ARN(partition, service, region, accountId, fullResource);
|
||||
}
|
||||
|
||||
|
@ -98,7 +98,7 @@ class ARN {
|
|||
|
||||
toString() {
|
||||
return ['arn', this.getPartition(), this.getService(),
|
||||
this.getRegion(), this.getAccountId(), this.getResource()]
|
||||
this.getRegion(), this.getAccountId(), this.getResource()]
|
||||
.join(':');
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,9 +52,9 @@ class BackendInfo {
|
|||
*/
|
||||
static isRequestEndpointPresent(config, requestEndpoint, log) {
|
||||
if (Object.keys(config.restEndpoints).
|
||||
indexOf(requestEndpoint) < 0) {
|
||||
indexOf(requestEndpoint) < 0) {
|
||||
log.trace('requestEndpoint does not match config restEndpoints',
|
||||
{ requestEndpoint });
|
||||
{ requestEndpoint });
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -70,10 +70,10 @@ class BackendInfo {
|
|||
*/
|
||||
static isRequestEndpointValueValid(config, requestEndpoint, log) {
|
||||
if (Object.keys(config.locationConstraints).
|
||||
indexOf(config.restEndpoints[requestEndpoint]) < 0) {
|
||||
indexOf(config.restEndpoints[requestEndpoint]) < 0) {
|
||||
log.trace('the default locationConstraint for request' +
|
||||
'Endpoint does not match any config locationConstraint',
|
||||
{ requestEndpoint });
|
||||
{ requestEndpoint });
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
|
@ -110,7 +110,7 @@ class BackendInfo {
|
|||
*/
|
||||
static isValidRequestEndpointOrBackend(config, requestEndpoint, log) {
|
||||
if (!BackendInfo.isRequestEndpointPresent(config, requestEndpoint,
|
||||
log)) {
|
||||
log)) {
|
||||
return BackendInfo.isMemOrFileBackend(config, log);
|
||||
}
|
||||
return BackendInfo.isRequestEndpointValueValid(config, requestEndpoint,
|
||||
|
@ -132,7 +132,7 @@ class BackendInfo {
|
|||
bucketLocationConstraint, requestEndpoint, log) {
|
||||
if (objectLocationConstraint) {
|
||||
if (BackendInfo.isValidLocationConstraint(config,
|
||||
objectLocationConstraint, log)) {
|
||||
objectLocationConstraint, log)) {
|
||||
log.trace('objectLocationConstraint is valid');
|
||||
return { isValid: true };
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ class BackendInfo {
|
|||
}
|
||||
if (bucketLocationConstraint) {
|
||||
if (BackendInfo.isValidLocationConstraint(config,
|
||||
bucketLocationConstraint, log)) {
|
||||
bucketLocationConstraint, log)) {
|
||||
log.trace('bucketLocationConstraint is valid');
|
||||
return { isValid: true };
|
||||
}
|
||||
|
@ -159,7 +159,7 @@ class BackendInfo {
|
|||
return { isValid: true, legacyLocationConstraint };
|
||||
}
|
||||
if (!BackendInfo.isValidRequestEndpointOrBackend(config,
|
||||
requestEndpoint, log)) {
|
||||
requestEndpoint, log)) {
|
||||
return { isValid: false, description: 'Endpoint Location Error - ' +
|
||||
`Your endpoint "${requestEndpoint}" is not in restEndpoints ` +
|
||||
'in your config OR the default location constraint for request ' +
|
||||
|
@ -167,7 +167,7 @@ class BackendInfo {
|
|||
'match any config locationConstraint - Please update.' };
|
||||
}
|
||||
if (BackendInfo.isRequestEndpointPresent(config, requestEndpoint,
|
||||
log)) {
|
||||
log)) {
|
||||
return { isValid: true };
|
||||
}
|
||||
return { isValid: true, defaultedToDataBackend: true };
|
||||
|
|
|
@ -69,13 +69,13 @@ class BucketInfo {
|
|||
* @param {object} [notificationConfiguration] - bucket notification configuration
|
||||
*/
|
||||
constructor(name, owner, ownerDisplayName, creationDate,
|
||||
mdBucketModelVersion, acl, transient, deleted,
|
||||
serverSideEncryption, versioningConfiguration,
|
||||
locationConstraint, websiteConfiguration, cors,
|
||||
replicationConfiguration, lifecycleConfiguration,
|
||||
bucketPolicy, uid, readLocationConstraint, isNFS,
|
||||
ingestionConfig, azureInfo, objectLockEnabled,
|
||||
objectLockConfiguration, notificationConfiguration) {
|
||||
mdBucketModelVersion, acl, transient, deleted,
|
||||
serverSideEncryption, versioningConfiguration,
|
||||
locationConstraint, websiteConfiguration, cors,
|
||||
replicationConfiguration, lifecycleConfiguration,
|
||||
bucketPolicy, uid, readLocationConstraint, isNFS,
|
||||
ingestionConfig, azureInfo, objectLockEnabled,
|
||||
objectLockConfiguration, notificationConfiguration) {
|
||||
assert.strictEqual(typeof name, 'string');
|
||||
assert.strictEqual(typeof owner, 'string');
|
||||
assert.strictEqual(typeof ownerDisplayName, 'string');
|
||||
|
@ -94,7 +94,7 @@ class BucketInfo {
|
|||
if (serverSideEncryption) {
|
||||
assert.strictEqual(typeof serverSideEncryption, 'object');
|
||||
const { cryptoScheme, algorithm, masterKeyId,
|
||||
configuredMasterKeyId, mandatory } = serverSideEncryption;
|
||||
configuredMasterKeyId, mandatory } = serverSideEncryption;
|
||||
assert.strictEqual(typeof cryptoScheme, 'number');
|
||||
assert.strictEqual(typeof algorithm, 'string');
|
||||
assert.strictEqual(typeof masterKeyId, 'string');
|
||||
|
|
|
@ -379,7 +379,7 @@ class LifecycleConfiguration {
|
|||
if (!tags[i].Key || !tags[i].Value) {
|
||||
tagObj.error =
|
||||
errors.MissingRequiredParameter.customizeDescription(
|
||||
'Tag XML does not contain both Key and Value');
|
||||
'Tag XML does not contain both Key and Value');
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -927,7 +927,7 @@ class LifecycleConfiguration {
|
|||
const daysInt = parseInt(subExp.Days[0], 10);
|
||||
if (daysInt < 1) {
|
||||
expObj.error = errors.InvalidArgument.customizeDescription(
|
||||
'Expiration days is not a positive integer');
|
||||
'Expiration days is not a positive integer');
|
||||
} else {
|
||||
expObj.days = daysInt;
|
||||
}
|
||||
|
@ -1123,10 +1123,10 @@ class LifecycleConfiguration {
|
|||
const { noncurrentDays, storageClass } = transition;
|
||||
xml.push(
|
||||
`<${actionName}>`,
|
||||
`<NoncurrentDays>${noncurrentDays}` +
|
||||
`<NoncurrentDays>${noncurrentDays}` +
|
||||
'</NoncurrentDays>',
|
||||
`<StorageClass>${storageClass}</StorageClass>`,
|
||||
`</${actionName}>`
|
||||
`<StorageClass>${storageClass}</StorageClass>`,
|
||||
`</${actionName}>`,
|
||||
);
|
||||
});
|
||||
Action = xml.join('');
|
||||
|
@ -1144,9 +1144,9 @@ class LifecycleConfiguration {
|
|||
}
|
||||
xml.push(
|
||||
`<${actionName}>`,
|
||||
element,
|
||||
`<StorageClass>${storageClass}</StorageClass>`,
|
||||
`</${actionName}>`
|
||||
element,
|
||||
`<StorageClass>${storageClass}</StorageClass>`,
|
||||
`</${actionName}>`,
|
||||
);
|
||||
});
|
||||
Action = xml.join('');
|
||||
|
|
|
@ -27,7 +27,7 @@ const errors = require('../errors');
|
|||
* </NotificationConfiguration>
|
||||
*/
|
||||
|
||||
/**
|
||||
/**
|
||||
* Format of config:
|
||||
*
|
||||
* config = {
|
||||
|
|
|
@ -17,7 +17,7 @@ const errors = require('../errors');
|
|||
* </ObjectLockConfiguration>
|
||||
*/
|
||||
|
||||
/**
|
||||
/**
|
||||
* Format of config:
|
||||
*
|
||||
* config = {
|
||||
|
|
|
@ -10,7 +10,6 @@ const ObjectMDLocation = require('./ObjectMDLocation');
|
|||
* mpuPart metadata for example)
|
||||
*/
|
||||
class ObjectMD {
|
||||
|
||||
/**
|
||||
* Create a new instance of ObjectMD. Parameter <tt>objMd</tt> is
|
||||
* reserved for internal use, users should call
|
||||
|
@ -148,7 +147,7 @@ class ObjectMD {
|
|||
|
||||
Object.assign(this._data, objMd._data);
|
||||
Object.assign(this._data.replicationInfo,
|
||||
objMd._data.replicationInfo);
|
||||
objMd._data.replicationInfo);
|
||||
}
|
||||
|
||||
_updateFromParsedJSON(objMd) {
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
* 'location' array
|
||||
*/
|
||||
class ObjectMDLocation {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
* @param {object} locationObj - single data location info
|
||||
|
|
|
@ -111,7 +111,7 @@ class RoundRobin {
|
|||
pickHost() {
|
||||
if (this.logger) {
|
||||
this.logger.debug('pick host',
|
||||
{ host: this.getCurrentHost() });
|
||||
{ host: this.getCurrentHost() });
|
||||
}
|
||||
const curHost = this.getCurrentHost();
|
||||
++this.pickCount;
|
||||
|
@ -163,7 +163,7 @@ class RoundRobin {
|
|||
}
|
||||
if (this.logger) {
|
||||
this.logger.debug('round robin host',
|
||||
{ newHost: this.getCurrentHost() });
|
||||
{ newHost: this.getCurrentHost() });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,7 +10,6 @@ const { checkSupportIPv6 } = require('./utils');
|
|||
|
||||
|
||||
class Server {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
|
@ -431,16 +430,16 @@ class Server {
|
|||
// Setting no delay of the socket to the value configured
|
||||
sock.setNoDelay(this.isNoDelay());
|
||||
sock.on('error', err => this._logger.info(
|
||||
'socket error - request rejected', { error: err }));
|
||||
'socket error - request rejected', { error: err }));
|
||||
});
|
||||
this._server.on('tlsClientError', (err, sock) =>
|
||||
this._onClientError(err, sock));
|
||||
this._onClientError(err, sock));
|
||||
this._server.on('clientError', (err, sock) =>
|
||||
this._onClientError(err, sock));
|
||||
this._onClientError(err, sock));
|
||||
this._server.on('checkContinue', (req, res) =>
|
||||
this._onCheckContinue(req, res));
|
||||
this._onCheckContinue(req, res));
|
||||
this._server.on('checkExpectation', (req, res) =>
|
||||
this._onCheckExpectation(req, res));
|
||||
this._onCheckExpectation(req, res));
|
||||
this._server.on('listening', () => this._onListening());
|
||||
}
|
||||
this._server.listen(this._port, this._address);
|
||||
|
|
|
@ -72,8 +72,8 @@ function getByteRangeFromSpec(rangeSpec, objectSize) {
|
|||
if (rangeSpec.start < objectSize) {
|
||||
// test is false if end is undefined
|
||||
return { range: [rangeSpec.start,
|
||||
(rangeSpec.end < objectSize ?
|
||||
rangeSpec.end : objectSize - 1)] };
|
||||
(rangeSpec.end < objectSize ?
|
||||
rangeSpec.end : objectSize - 1)] };
|
||||
}
|
||||
return { error: errors.InvalidRange };
|
||||
}
|
||||
|
|
|
@ -90,8 +90,8 @@ function _negotiateProtocolVersion(client, logger, cb) {
|
|||
if (err) {
|
||||
const error = _arsenalError(err);
|
||||
logger.error('KMIP::negotiateProtocolVersion',
|
||||
{ error,
|
||||
vendorIdentification: client.vendorIdentification });
|
||||
{ error,
|
||||
vendorIdentification: client.vendorIdentification });
|
||||
return cb(error);
|
||||
}
|
||||
const majorVersions =
|
||||
|
@ -102,8 +102,8 @@ function _negotiateProtocolVersion(client, logger, cb) {
|
|||
majorVersions.length !== minorVersions.length) {
|
||||
const error = _arsenalError('No suitable protocol version');
|
||||
logger.error('KMIP::negotiateProtocolVersion',
|
||||
{ error,
|
||||
vendorIdentification: client.vendorIdentification });
|
||||
{ error,
|
||||
vendorIdentification: client.vendorIdentification });
|
||||
return cb(error);
|
||||
}
|
||||
client.kmip.changeProtocolVersion(majorVersions[0], minorVersions[0]);
|
||||
|
@ -126,8 +126,8 @@ function _mapExtensions(client, logger, cb) {
|
|||
if (err) {
|
||||
const error = _arsenalError(err);
|
||||
logger.error('KMIP::mapExtensions',
|
||||
{ error,
|
||||
vendorIdentification: client.vendorIdentification });
|
||||
{ error,
|
||||
vendorIdentification: client.vendorIdentification });
|
||||
return cb(error);
|
||||
}
|
||||
const extensionNames = response.lookup(searchFilter.extensionName);
|
||||
|
@ -135,8 +135,8 @@ function _mapExtensions(client, logger, cb) {
|
|||
if (extensionNames.length !== extensionTags.length) {
|
||||
const error = _arsenalError('Inconsistent extension list');
|
||||
logger.error('KMIP::mapExtensions',
|
||||
{ error,
|
||||
vendorIdentification: client.vendorIdentification });
|
||||
{ error,
|
||||
vendorIdentification: client.vendorIdentification });
|
||||
return cb(error);
|
||||
}
|
||||
extensionNames.forEach((extensionName, idx) => {
|
||||
|
@ -160,7 +160,7 @@ function _queryServerInformation(client, logger, cb) {
|
|||
if (err) {
|
||||
const error = _arsenalError(err);
|
||||
logger.warn('KMIP::queryServerInformation',
|
||||
{ error });
|
||||
{ error });
|
||||
/* no error returned, caller can keep going */
|
||||
return cb();
|
||||
}
|
||||
|
@ -170,9 +170,9 @@ function _queryServerInformation(client, logger, cb) {
|
|||
JSON.stringify(response.lookup(searchFilter.serverInformation)[0]));
|
||||
|
||||
logger.info('KMIP Server identified',
|
||||
{ vendorIdentification: client.vendorIdentification,
|
||||
serverInformation: client.serverInformation,
|
||||
negotiatedProtocolVersion: client.kmip.protocolVersion });
|
||||
{ vendorIdentification: client.vendorIdentification,
|
||||
serverInformation: client.serverInformation,
|
||||
negotiatedProtocolVersion: client.kmip.protocolVersion });
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
|
@ -196,8 +196,8 @@ function _queryOperationsAndObjects(client, logger, cb) {
|
|||
if (err) {
|
||||
const error = _arsenalError(err);
|
||||
logger.error('KMIP::queryOperationsAndObjects',
|
||||
{ error,
|
||||
vendorIdentification: client.vendorIdentification });
|
||||
{ error,
|
||||
vendorIdentification: client.vendorIdentification });
|
||||
return cb(error);
|
||||
}
|
||||
const supportedOperations = response.lookup(searchFilter.operation);
|
||||
|
@ -222,15 +222,15 @@ function _queryOperationsAndObjects(client, logger, cb) {
|
|||
logger.warn('KMIP::queryOperationsAndObjects: ' +
|
||||
'The KMIP Server announces that it ' +
|
||||
'does not support all of the required features',
|
||||
{ vendorIdentification: client.vendorIdentification,
|
||||
serverInformation: client.serverInformation,
|
||||
supportsEncrypt, supportsDecrypt,
|
||||
supportsActivate, supportsRevoke,
|
||||
supportsCreate, supportsDestroy,
|
||||
supportsQuery, supportsSymmetricKeys });
|
||||
{ vendorIdentification: client.vendorIdentification,
|
||||
serverInformation: client.serverInformation,
|
||||
supportsEncrypt, supportsDecrypt,
|
||||
supportsActivate, supportsRevoke,
|
||||
supportsCreate, supportsDestroy,
|
||||
supportsQuery, supportsSymmetricKeys });
|
||||
} else {
|
||||
logger.info('KMIP Server provides the necessary feature set',
|
||||
{ vendorIdentification: client.vendorIdentification });
|
||||
{ vendorIdentification: client.vendorIdentification });
|
||||
}
|
||||
return cb();
|
||||
});
|
||||
|
@ -264,8 +264,8 @@ class Client {
|
|||
this.vendorIdentification = '';
|
||||
this.serverInformation = [];
|
||||
this.kmip = new KMIP(CodecClass || TTLVCodec,
|
||||
TransportClass || TlsTransport,
|
||||
options);
|
||||
TransportClass || TlsTransport,
|
||||
options);
|
||||
this.kmip.registerHandshakeFunction((logger, cb) => {
|
||||
this._kmipHandshake(logger, cb);
|
||||
});
|
||||
|
@ -322,8 +322,8 @@ class Client {
|
|||
if (err) {
|
||||
const error = _arsenalError(err);
|
||||
logger.error('KMIP::_activateBucketKey',
|
||||
{ error,
|
||||
serverInformation: this.serverInformation });
|
||||
{ error,
|
||||
serverInformation: this.serverInformation });
|
||||
return cb(error);
|
||||
}
|
||||
const uniqueIdentifier =
|
||||
|
@ -332,7 +332,7 @@ class Client {
|
|||
const error = _arsenalError(
|
||||
'Server did not return the expected identifier');
|
||||
logger.error('KMIP::cipherDataKey',
|
||||
{ error, uniqueIdentifier });
|
||||
{ error, uniqueIdentifier });
|
||||
return cb(error);
|
||||
}
|
||||
return cb(null, keyIdentifier);
|
||||
|
@ -351,20 +351,20 @@ class Client {
|
|||
const attributes = [];
|
||||
if (!!this.options.bucketNameAttributeName) {
|
||||
attributes.push(KMIP.Attribute('TextString',
|
||||
this.options.bucketNameAttributeName,
|
||||
bucketName));
|
||||
this.options.bucketNameAttributeName,
|
||||
bucketName));
|
||||
}
|
||||
attributes.push(...[
|
||||
KMIP.Attribute('Enumeration', 'Cryptographic Algorithm',
|
||||
CRYPTOGRAPHIC_ALGORITHM),
|
||||
CRYPTOGRAPHIC_ALGORITHM),
|
||||
KMIP.Attribute('Integer', 'Cryptographic Length',
|
||||
CRYPTOGRAPHIC_LENGTH),
|
||||
CRYPTOGRAPHIC_LENGTH),
|
||||
KMIP.Attribute('Integer', 'Cryptographic Usage Mask',
|
||||
this.kmip.encodeMask('Cryptographic Usage Mask',
|
||||
CRYPTOGRAPHIC_USAGE_MASK))]);
|
||||
this.kmip.encodeMask('Cryptographic Usage Mask',
|
||||
CRYPTOGRAPHIC_USAGE_MASK))]);
|
||||
if (this.options.compoundCreateActivate) {
|
||||
attributes.push(KMIP.Attribute('Date-Time', 'Activation Date',
|
||||
new Date(Date.UTC())));
|
||||
new Date(Date.UTC())));
|
||||
}
|
||||
|
||||
return this.kmip.request(logger, 'Create', [
|
||||
|
@ -374,8 +374,8 @@ class Client {
|
|||
if (err) {
|
||||
const error = _arsenalError(err);
|
||||
logger.error('KMIP::createBucketKey',
|
||||
{ error,
|
||||
serverInformation: this.serverInformation });
|
||||
{ error,
|
||||
serverInformation: this.serverInformation });
|
||||
return cb(error);
|
||||
}
|
||||
const createdObjectType =
|
||||
|
@ -386,7 +386,7 @@ class Client {
|
|||
const error = _arsenalError(
|
||||
'Server created an object of wrong type');
|
||||
logger.error('KMIP::createBucketKey',
|
||||
{ error, createdObjectType });
|
||||
{ error, createdObjectType });
|
||||
return cb(error);
|
||||
}
|
||||
if (!this.options.compoundCreateActivate) {
|
||||
|
@ -411,16 +411,16 @@ class Client {
|
|||
KMIP.TextString('Unique Identifier', bucketKeyId),
|
||||
KMIP.Structure('Revocation Reason', [
|
||||
KMIP.Enumeration('Revocation Reason Code',
|
||||
'Cessation of Operation'),
|
||||
'Cessation of Operation'),
|
||||
KMIP.TextString('Revocation Message',
|
||||
'About to be deleted'),
|
||||
'About to be deleted'),
|
||||
]),
|
||||
], (err, response) => {
|
||||
if (err) {
|
||||
const error = _arsenalError(err);
|
||||
logger.error('KMIP::_revokeBucketKey',
|
||||
{ error,
|
||||
serverInformation: this.serverInformation });
|
||||
{ error,
|
||||
serverInformation: this.serverInformation });
|
||||
return cb(error);
|
||||
}
|
||||
const uniqueIdentifier =
|
||||
|
@ -429,7 +429,7 @@ class Client {
|
|||
const error = _arsenalError(
|
||||
'Server did not return the expected identifier');
|
||||
logger.error('KMIP::_revokeBucketKey',
|
||||
{ error, uniqueIdentifier });
|
||||
{ error, uniqueIdentifier });
|
||||
return cb(error);
|
||||
}
|
||||
return cb();
|
||||
|
@ -448,8 +448,8 @@ class Client {
|
|||
if (err) {
|
||||
const error = _arsenalError(err);
|
||||
logger.error('KMIP::destroyBucketKey: revocation failed',
|
||||
{ error,
|
||||
serverInformation: this.serverInformation });
|
||||
{ error,
|
||||
serverInformation: this.serverInformation });
|
||||
return cb(error);
|
||||
}
|
||||
return this.kmip.request(logger, 'Destroy', [
|
||||
|
@ -458,8 +458,8 @@ class Client {
|
|||
if (err) {
|
||||
const error = _arsenalError(err);
|
||||
logger.error('KMIP::destroyBucketKey',
|
||||
{ error,
|
||||
serverInformation: this.serverInformation });
|
||||
{ error,
|
||||
serverInformation: this.serverInformation });
|
||||
return cb(error);
|
||||
}
|
||||
const uniqueIdentifier =
|
||||
|
@ -468,7 +468,7 @@ class Client {
|
|||
const error = _arsenalError(
|
||||
'Server did not return the expected identifier');
|
||||
logger.error('KMIP::destroyBucketKey',
|
||||
{ error, uniqueIdentifier });
|
||||
{ error, uniqueIdentifier });
|
||||
return cb(error);
|
||||
}
|
||||
return cb();
|
||||
|
@ -487,19 +487,19 @@ class Client {
|
|||
* @callback called with (err, cipheredDataKey: Buffer)
|
||||
*/
|
||||
cipherDataKey(cryptoScheme,
|
||||
masterKeyId,
|
||||
plainTextDataKey,
|
||||
logger,
|
||||
cb) {
|
||||
masterKeyId,
|
||||
plainTextDataKey,
|
||||
logger,
|
||||
cb) {
|
||||
return this.kmip.request(logger, 'Encrypt', [
|
||||
KMIP.TextString('Unique Identifier', masterKeyId),
|
||||
KMIP.Structure('Cryptographic Parameters', [
|
||||
KMIP.Enumeration('Block Cipher Mode',
|
||||
CRYPTOGRAPHIC_CIPHER_MODE),
|
||||
CRYPTOGRAPHIC_CIPHER_MODE),
|
||||
KMIP.Enumeration('Padding Method',
|
||||
CRYPTOGRAPHIC_PADDING_METHOD),
|
||||
CRYPTOGRAPHIC_PADDING_METHOD),
|
||||
KMIP.Enumeration('Cryptographic Algorithm',
|
||||
CRYPTOGRAPHIC_ALGORITHM),
|
||||
CRYPTOGRAPHIC_ALGORITHM),
|
||||
]),
|
||||
KMIP.ByteString('Data', plainTextDataKey),
|
||||
KMIP.ByteString('IV/Counter/Nonce', CRYPTOGRAPHIC_DEFAULT_IV),
|
||||
|
@ -507,8 +507,8 @@ class Client {
|
|||
if (err) {
|
||||
const error = _arsenalError(err);
|
||||
logger.error('KMIP::cipherDataKey',
|
||||
{ error,
|
||||
serverInformation: this.serverInformation });
|
||||
{ error,
|
||||
serverInformation: this.serverInformation });
|
||||
return cb(error);
|
||||
}
|
||||
const uniqueIdentifier =
|
||||
|
@ -518,7 +518,7 @@ class Client {
|
|||
const error = _arsenalError(
|
||||
'Server did not return the expected identifier');
|
||||
logger.error('KMIP::cipherDataKey',
|
||||
{ error, uniqueIdentifier });
|
||||
{ error, uniqueIdentifier });
|
||||
return cb(error);
|
||||
}
|
||||
return cb(null, data);
|
||||
|
@ -536,19 +536,19 @@ class Client {
|
|||
* @callback called with (err, plainTextDataKey: Buffer)
|
||||
*/
|
||||
decipherDataKey(cryptoScheme,
|
||||
masterKeyId,
|
||||
cipheredDataKey,
|
||||
logger,
|
||||
cb) {
|
||||
masterKeyId,
|
||||
cipheredDataKey,
|
||||
logger,
|
||||
cb) {
|
||||
return this.kmip.request(logger, 'Decrypt', [
|
||||
KMIP.TextString('Unique Identifier', masterKeyId),
|
||||
KMIP.Structure('Cryptographic Parameters', [
|
||||
KMIP.Enumeration('Block Cipher Mode',
|
||||
CRYPTOGRAPHIC_CIPHER_MODE),
|
||||
CRYPTOGRAPHIC_CIPHER_MODE),
|
||||
KMIP.Enumeration('Padding Method',
|
||||
CRYPTOGRAPHIC_PADDING_METHOD),
|
||||
CRYPTOGRAPHIC_PADDING_METHOD),
|
||||
KMIP.Enumeration('Cryptographic Algorithm',
|
||||
CRYPTOGRAPHIC_ALGORITHM),
|
||||
CRYPTOGRAPHIC_ALGORITHM),
|
||||
]),
|
||||
KMIP.ByteString('Data', cipheredDataKey),
|
||||
KMIP.ByteString('IV/Counter/Nonce', CRYPTOGRAPHIC_DEFAULT_IV),
|
||||
|
@ -556,8 +556,8 @@ class Client {
|
|||
if (err) {
|
||||
const error = _arsenalError(err);
|
||||
logger.error('KMIP::decipherDataKey',
|
||||
{ error,
|
||||
serverInformation: this.serverInformation });
|
||||
{ error,
|
||||
serverInformation: this.serverInformation });
|
||||
return cb(error);
|
||||
}
|
||||
const uniqueIdentifier =
|
||||
|
@ -567,7 +567,7 @@ class Client {
|
|||
const error = _arsenalError(
|
||||
'Server did not return the right identifier');
|
||||
logger.error('KMIP::decipherDataKey',
|
||||
{ error, uniqueIdentifier });
|
||||
{ error, uniqueIdentifier });
|
||||
return cb(error);
|
||||
}
|
||||
return cb(null, data);
|
||||
|
|
|
@ -55,15 +55,15 @@ function TTLVCodec() {
|
|||
const property = {};
|
||||
if (!TypeDecoder[elementType]) {
|
||||
_throwError(logger,
|
||||
'Unknown element type',
|
||||
{ funcName, elementTag, elementType });
|
||||
'Unknown element type',
|
||||
{ funcName, elementTag, elementType });
|
||||
}
|
||||
const elementValue = value.slice(i + 8,
|
||||
i + 8 + elementLength);
|
||||
i + 8 + elementLength);
|
||||
if (elementValue.length !== elementLength) {
|
||||
_throwError(logger, 'BUG: Wrong buffer size',
|
||||
{ funcName, elementLength,
|
||||
bufferLength: elementValue.length });
|
||||
{ funcName, elementLength,
|
||||
bufferLength: elementValue.length });
|
||||
}
|
||||
property.type = TypeDecoder[elementType].name;
|
||||
property.value = TypeDecoder[elementType]
|
||||
|
@ -75,7 +75,7 @@ function TTLVCodec() {
|
|||
const tagInfo = TagDecoder[elementTag];
|
||||
if (!tagInfo) {
|
||||
logger.debug('Unknown element tag',
|
||||
{ funcName, elementTag });
|
||||
{ funcName, elementTag });
|
||||
property.tag = elementTag;
|
||||
element['Unknown Tag'] = property;
|
||||
} else {
|
||||
|
@ -83,8 +83,8 @@ function TTLVCodec() {
|
|||
if (tagInfo.name === 'Attribute Name') {
|
||||
if (property.type !== 'TextString') {
|
||||
_throwError(logger,
|
||||
'Invalide type',
|
||||
{ funcName, type: property.type });
|
||||
'Invalide type',
|
||||
{ funcName, type: property.type });
|
||||
}
|
||||
diversion = property.value;
|
||||
}
|
||||
|
@ -114,8 +114,8 @@ function TTLVCodec() {
|
|||
}
|
||||
const itemResult =
|
||||
TypeEncoder[itemType].encode(itemTagName,
|
||||
itemValue,
|
||||
itemDiversion);
|
||||
itemValue,
|
||||
itemDiversion);
|
||||
encodedValue = encodedValue
|
||||
.concat(_ttlvPadVector(itemResult));
|
||||
});
|
||||
|
@ -133,9 +133,9 @@ function TTLVCodec() {
|
|||
const fixedLength = 4;
|
||||
if (fixedLength !== value.length) {
|
||||
_throwError(logger,
|
||||
'Length mismatch',
|
||||
{ funcName, fixedLength,
|
||||
bufferLength: value.length });
|
||||
'Length mismatch',
|
||||
{ funcName, fixedLength,
|
||||
bufferLength: value.length });
|
||||
}
|
||||
return value.readUInt32BE(0);
|
||||
},
|
||||
|
@ -156,16 +156,16 @@ function TTLVCodec() {
|
|||
const fixedLength = 8;
|
||||
if (fixedLength !== value.length) {
|
||||
_throwError(logger,
|
||||
'Length mismatch',
|
||||
{ funcName, fixedLength,
|
||||
bufferLength: value.length });
|
||||
'Length mismatch',
|
||||
{ funcName, fixedLength,
|
||||
bufferLength: value.length });
|
||||
}
|
||||
const longUInt = UINT32_MAX * value.readUInt32BE(0) +
|
||||
value.readUInt32BE(4);
|
||||
if (longUInt > Number.MAX_SAFE_INTEGER) {
|
||||
_throwError(logger,
|
||||
'53-bit overflow',
|
||||
{ funcName, longUInt });
|
||||
'53-bit overflow',
|
||||
{ funcName, longUInt });
|
||||
}
|
||||
return longUInt;
|
||||
},
|
||||
|
@ -200,9 +200,9 @@ function TTLVCodec() {
|
|||
const fixedLength = 4;
|
||||
if (fixedLength !== value.length) {
|
||||
_throwError(logger,
|
||||
'Length mismatch',
|
||||
{ funcName, fixedLength,
|
||||
bufferLength: value.length });
|
||||
'Length mismatch',
|
||||
{ funcName, fixedLength,
|
||||
bufferLength: value.length });
|
||||
}
|
||||
const enumValue = value.toString('hex');
|
||||
const actualTag = diversion ? TagEncoder[diversion].value : tag;
|
||||
|
@ -211,10 +211,10 @@ function TTLVCodec() {
|
|||
!enumInfo.enumeration ||
|
||||
!enumInfo.enumeration[enumValue]) {
|
||||
return { tag,
|
||||
value: enumValue,
|
||||
message: 'Unknown enumeration value',
|
||||
diversion,
|
||||
};
|
||||
value: enumValue,
|
||||
message: 'Unknown enumeration value',
|
||||
diversion,
|
||||
};
|
||||
}
|
||||
return enumInfo.enumeration[enumValue];
|
||||
},
|
||||
|
@ -227,7 +227,7 @@ function TTLVCodec() {
|
|||
const actualTag = diversion || tagName;
|
||||
const encodedValue =
|
||||
Buffer.from(TagEncoder[actualTag].enumeration[value],
|
||||
'hex');
|
||||
'hex');
|
||||
return _ttlvPadVector([tag, type, length, encodedValue]);
|
||||
},
|
||||
},
|
||||
|
@ -238,9 +238,9 @@ function TTLVCodec() {
|
|||
const fixedLength = 8;
|
||||
if (fixedLength !== value.length) {
|
||||
_throwError(logger,
|
||||
'Length mismatch',
|
||||
{ funcName, fixedLength,
|
||||
bufferLength: value.length });
|
||||
'Length mismatch',
|
||||
{ funcName, fixedLength,
|
||||
bufferLength: value.length });
|
||||
}
|
||||
const msUInt = value.readUInt32BE(0);
|
||||
const lsUInt = value.readUInt32BE(4);
|
||||
|
@ -267,7 +267,7 @@ function TTLVCodec() {
|
|||
const length = Buffer.alloc(4);
|
||||
length.writeUInt32BE(value.length);
|
||||
return _ttlvPadVector([tag, type, length,
|
||||
Buffer.from(value, 'utf8')]);
|
||||
Buffer.from(value, 'utf8')]);
|
||||
},
|
||||
},
|
||||
'08': {
|
||||
|
@ -289,17 +289,17 @@ function TTLVCodec() {
|
|||
const fixedLength = 8;
|
||||
if (fixedLength !== value.length) {
|
||||
_throwError(logger,
|
||||
'Length mismatch',
|
||||
{ funcName, fixedLength,
|
||||
bufferLength: value.length });
|
||||
'Length mismatch',
|
||||
{ funcName, fixedLength,
|
||||
bufferLength: value.length });
|
||||
}
|
||||
const d = new Date(0);
|
||||
const utcSeconds = UINT32_MAX * value.readUInt32BE(0) +
|
||||
value.readUInt32BE(4);
|
||||
if (utcSeconds > Number.MAX_SAFE_INTEGER) {
|
||||
_throwError(logger,
|
||||
'53-bit overflow',
|
||||
{ funcName, utcSeconds });
|
||||
'53-bit overflow',
|
||||
{ funcName, utcSeconds });
|
||||
}
|
||||
d.setUTCSeconds(utcSeconds);
|
||||
return d;
|
||||
|
@ -323,9 +323,9 @@ function TTLVCodec() {
|
|||
const fixedLength = 4;
|
||||
if (fixedLength !== value.length) {
|
||||
_throwError(logger,
|
||||
'Length mismatch',
|
||||
{ funcName, fixedLength,
|
||||
bufferLength: value.length });
|
||||
'Length mismatch',
|
||||
{ funcName, fixedLength,
|
||||
bufferLength: value.length });
|
||||
}
|
||||
return value.readInt32BE(0);
|
||||
},
|
||||
|
@ -415,8 +415,8 @@ function TTLVCodec() {
|
|||
throw Error(`Unknown Type '${type}'`);
|
||||
}
|
||||
const itemValue = TypeEncoder[type].encode(key,
|
||||
item[key].value,
|
||||
item[key].diversion);
|
||||
item[key].value,
|
||||
item[key].diversion);
|
||||
result = result.concat(_ttlvPadVector(itemValue));
|
||||
});
|
||||
});
|
||||
|
|
|
@ -275,11 +275,11 @@ class KMIP {
|
|||
KMIP.Structure('Request Header', [
|
||||
KMIP.Structure('Protocol Version', [
|
||||
KMIP.Integer('Protocol Version Major',
|
||||
this.protocolVersion.major),
|
||||
this.protocolVersion.major),
|
||||
KMIP.Integer('Protocol Version Minor',
|
||||
this.protocolVersion.minor)]),
|
||||
this.protocolVersion.minor)]),
|
||||
KMIP.Integer('Maximum Response Size',
|
||||
this.maximumResponseSize),
|
||||
this.maximumResponseSize),
|
||||
KMIP.Integer('Batch Count', 1)]),
|
||||
KMIP.Structure('Batch Item', [
|
||||
KMIP.Enumeration('Operation', operation),
|
||||
|
@ -292,7 +292,7 @@ class KMIP {
|
|||
(err, conversation, rawResponse) => {
|
||||
if (err) {
|
||||
logger.error('KMIP::request: Failed to send message',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
return cb(err);
|
||||
}
|
||||
const response = this._decodeMessage(logger, rawResponse);
|
||||
|
@ -311,16 +311,16 @@ class KMIP {
|
|||
this.transport.abortPipeline(conversation);
|
||||
const error = Error('Invalid batch item ID returned');
|
||||
logger.error('KMIP::request: failed',
|
||||
{ resultUniqueBatchItemID, uuid, error });
|
||||
{ resultUniqueBatchItemID, uuid, error });
|
||||
return cb(error);
|
||||
}
|
||||
if (performedOperation !== operation) {
|
||||
this.transport.abortPipeline(conversation);
|
||||
const error = Error('Operation mismatch',
|
||||
{ got: performedOperation,
|
||||
expected: operation });
|
||||
{ got: performedOperation,
|
||||
expected: operation });
|
||||
logger.error('KMIP::request: Operation mismatch',
|
||||
{ error });
|
||||
{ error });
|
||||
return cb(error);
|
||||
}
|
||||
if (resultStatus !== 'Success') {
|
||||
|
@ -331,19 +331,17 @@ class KMIP {
|
|||
response.lookup(
|
||||
'Response Message/Batch Item/Result Message')[0];
|
||||
const error = Error('KMIP request failure',
|
||||
{ resultStatus,
|
||||
resultReason,
|
||||
resultMessage });
|
||||
{ resultStatus,
|
||||
resultReason,
|
||||
resultMessage });
|
||||
logger.error('KMIP::request: request failed',
|
||||
{ error, resultStatus,
|
||||
resultReason, resultMessage });
|
||||
{ error, resultStatus,
|
||||
resultReason, resultMessage });
|
||||
return cb(error);
|
||||
}
|
||||
return cb(null, response);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -86,8 +86,8 @@ class TransportTemplate {
|
|||
const deferedRequest = this.deferedRequests.shift();
|
||||
process.nextTick(() => {
|
||||
this.send(logger,
|
||||
deferedRequest.encodedMessage,
|
||||
deferedRequest.cb);
|
||||
deferedRequest.encodedMessage,
|
||||
deferedRequest.cb);
|
||||
});
|
||||
} else if (this.callbackPipeline.length === 0 &&
|
||||
this.deferedRequests.length === 0 &&
|
||||
|
|
|
@ -26,7 +26,7 @@ function sendError(res, log, error, optMessage) {
|
|||
httpCode: error.code,
|
||||
errorType: error.message,
|
||||
error: message,
|
||||
}
|
||||
},
|
||||
);
|
||||
res.writeHead(error.code);
|
||||
res.end(JSON.stringify({
|
||||
|
|
|
@ -19,7 +19,7 @@ function setContentRange(response, byteRange, objectSize) {
|
|||
const [start, end] = byteRange;
|
||||
assert(start !== undefined && end !== undefined);
|
||||
response.setHeader('Content-Range',
|
||||
`bytes ${start}-${end}/${objectSize}`);
|
||||
`bytes ${start}-${end}/${objectSize}`);
|
||||
}
|
||||
|
||||
function sendError(res, log, error, optMessage) {
|
||||
|
@ -45,7 +45,6 @@ function sendError(res, log, error, optMessage) {
|
|||
* start() to start listening to the configured port.
|
||||
*/
|
||||
class RESTServer extends httpServer {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
* @param {Object} params - constructor params
|
||||
|
@ -227,7 +226,7 @@ class RESTServer extends httpServer {
|
|||
return sendError(res, log, err);
|
||||
}
|
||||
log.debug('sending back 200/206 response with contents',
|
||||
{ key: pathInfo.key });
|
||||
{ key: pathInfo.key });
|
||||
setContentLength(res, contentLength);
|
||||
res.setHeader('Accept-Ranges', 'bytes');
|
||||
if (byteRange) {
|
||||
|
@ -265,7 +264,7 @@ class RESTServer extends httpServer {
|
|||
return sendError(res, log, err);
|
||||
}
|
||||
log.debug('sending back 204 response to DELETE',
|
||||
{ key: pathInfo.key });
|
||||
{ key: pathInfo.key });
|
||||
res.writeHead(204);
|
||||
return res.end(() => {
|
||||
log.debug('DELETE response sent', { key: pathInfo.key });
|
||||
|
|
|
@ -19,7 +19,7 @@ function explodePath(path) {
|
|||
return {
|
||||
service: pathMatch[1],
|
||||
key: (pathMatch[3] !== undefined && pathMatch[3].length > 0 ?
|
||||
pathMatch[3] : undefined),
|
||||
pathMatch[3] : undefined),
|
||||
};
|
||||
}
|
||||
throw errors.InvalidURI.customizeDescription('malformed URI');
|
||||
|
|
|
@ -17,7 +17,6 @@ const rpc = require('./rpc.js');
|
|||
* RPC client object accessing the sub-level transparently.
|
||||
*/
|
||||
class LevelDbClient extends rpc.BaseClient {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
|
@ -78,7 +77,6 @@ class LevelDbClient extends rpc.BaseClient {
|
|||
* env.subDb (env is passed as first parameter of received RPC calls).
|
||||
*/
|
||||
class LevelDbService extends rpc.BaseService {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
|
|
|
@ -37,7 +37,6 @@ let streamRPCJSONObj;
|
|||
* an error occurred).
|
||||
*/
|
||||
class BaseClient extends EventEmitter {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
|
@ -54,7 +53,7 @@ class BaseClient extends EventEmitter {
|
|||
*/
|
||||
constructor(params) {
|
||||
const { url, logger, callTimeoutMs,
|
||||
streamMaxPendingAck, streamAckTimeoutMs } = params;
|
||||
streamMaxPendingAck, streamAckTimeoutMs } = params;
|
||||
assert(url);
|
||||
assert(logger);
|
||||
|
||||
|
@ -82,11 +81,11 @@ class BaseClient extends EventEmitter {
|
|||
_call(remoteCall, args, cb) {
|
||||
const wrapCb = (err, data) => {
|
||||
cb(reconstructError(err),
|
||||
this.socketStreams.decodeStreams(data));
|
||||
this.socketStreams.decodeStreams(data));
|
||||
};
|
||||
this.logger.debug('remote call', { remoteCall, args });
|
||||
this.socket.emit('call', remoteCall,
|
||||
this.socketStreams.encodeStreams(args), wrapCb);
|
||||
this.socketStreams.encodeStreams(args), wrapCb);
|
||||
return undefined;
|
||||
}
|
||||
|
||||
|
@ -113,8 +112,8 @@ class BaseClient extends EventEmitter {
|
|||
throw new Error(`argument cb=${cb} is not a callback`);
|
||||
}
|
||||
async.timeout(this._call.bind(this), timeoutMs,
|
||||
`operation ${remoteCall} timed out`)(remoteCall,
|
||||
args, cb);
|
||||
`operation ${remoteCall} timed out`)(remoteCall,
|
||||
args, cb);
|
||||
return undefined;
|
||||
}
|
||||
|
||||
|
@ -142,7 +141,7 @@ class BaseClient extends EventEmitter {
|
|||
const url = this.url;
|
||||
this.socket.on('error', err => {
|
||||
this.logger.warn('connectivity error to the RPC service',
|
||||
{ url, error: err });
|
||||
{ url, error: err });
|
||||
});
|
||||
this.socket.on('connect', () => {
|
||||
this.emit('connect');
|
||||
|
@ -156,7 +155,7 @@ class BaseClient extends EventEmitter {
|
|||
this.getManifest((err, manifest) => {
|
||||
if (err) {
|
||||
this.logger.error('Error fetching manifest from RPC server',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
} else {
|
||||
manifest.api.forEach(apiItem => {
|
||||
this.createCall(apiItem.name);
|
||||
|
@ -251,7 +250,6 @@ class BaseClient extends EventEmitter {
|
|||
*
|
||||
*/
|
||||
class BaseService {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
|
@ -497,7 +495,7 @@ function RPCServer(params) {
|
|||
|
||||
conn.on('error', err => {
|
||||
log.error('error on socket.io connection',
|
||||
{ namespace: service.namespace, error: err });
|
||||
{ namespace: service.namespace, error: err });
|
||||
});
|
||||
conn.on('call', (remoteCall, args, cb) => {
|
||||
const decodedArgs = streamsSocket.decodeStreams(args);
|
||||
|
@ -647,8 +645,8 @@ streamRPCJSONObj = function _streamRPCJSONObj(obj, wstream, cb) {
|
|||
// primitive types
|
||||
if (obj === undefined) {
|
||||
wstream.write('null'); // if undefined elements are present in
|
||||
// arrays, convert them to JSON null
|
||||
// objects
|
||||
// arrays, convert them to JSON null
|
||||
// objects
|
||||
} else {
|
||||
wstream.write(JSON.stringify(obj));
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ class SIOOutputStream extends stream.Writable {
|
|||
constructor(socket, streamId, maxPendingAck, ackTimeoutMs) {
|
||||
super({ objectMode: true });
|
||||
this._initOutputStream(socket, streamId, maxPendingAck,
|
||||
ackTimeoutMs);
|
||||
ackTimeoutMs);
|
||||
}
|
||||
|
||||
_initOutputStream(socket, streamId, maxPendingAck, ackTimeoutMs) {
|
||||
|
@ -194,7 +194,7 @@ class SIOStreamSocket {
|
|||
this.socket.on('stream-data', (payload, cb) => {
|
||||
const { streamId, data } = payload;
|
||||
log.debug('received \'stream-data\' event',
|
||||
{ streamId, size: data.length });
|
||||
{ streamId, size: data.length });
|
||||
const stream = this.remoteStreams[streamId];
|
||||
if (!stream) {
|
||||
log.debug('no such remote stream registered', { streamId });
|
||||
|
@ -280,15 +280,15 @@ class SIOStreamSocket {
|
|||
let transportStream;
|
||||
if (isReadStream) {
|
||||
transportStream = new SIOOutputStream(this, streamId,
|
||||
this.maxPendingAck,
|
||||
this.ackTimeoutMs);
|
||||
this.maxPendingAck,
|
||||
this.ackTimeoutMs);
|
||||
} else {
|
||||
transportStream = new SIOInputStream(this, streamId);
|
||||
}
|
||||
this.localStreams[streamId] = arg;
|
||||
arg.once('close', () => {
|
||||
log.debug('stream closed, removing from local streams',
|
||||
{ streamId });
|
||||
{ streamId });
|
||||
delete this.localStreams[streamId];
|
||||
});
|
||||
arg.on('error', error => {
|
||||
|
@ -350,8 +350,8 @@ class SIOStreamSocket {
|
|||
stream = new SIOInputStream(this, streamId);
|
||||
} else if (arg.writable) {
|
||||
stream = new SIOOutputStream(this, streamId,
|
||||
this.maxPendingAck,
|
||||
this.ackTimeoutMs);
|
||||
this.maxPendingAck,
|
||||
this.ackTimeoutMs);
|
||||
} else {
|
||||
throw new Error('can\'t decode stream neither readable ' +
|
||||
'nor writable');
|
||||
|
@ -360,14 +360,14 @@ class SIOStreamSocket {
|
|||
if (arg.readable) {
|
||||
stream.once('close', () => {
|
||||
log.debug('stream closed, removing from remote streams',
|
||||
{ streamId });
|
||||
{ streamId });
|
||||
delete this.remoteStreams[streamId];
|
||||
});
|
||||
}
|
||||
if (arg.writable) {
|
||||
stream.once('finish', () => {
|
||||
log.debug('stream finished, removing from remote streams',
|
||||
{ streamId });
|
||||
{ streamId });
|
||||
delete this.remoteStreams[streamId];
|
||||
});
|
||||
}
|
||||
|
@ -399,7 +399,7 @@ class SIOStreamSocket {
|
|||
|
||||
_write(streamId, data, cb) {
|
||||
this.logger.debug('emit \'stream-data\' event',
|
||||
{ streamId, size: data.length });
|
||||
{ streamId, size: data.length });
|
||||
this.socket.emit('stream-data', { streamId, data }, cb);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const Ajv = require('ajv');
|
||||
const userPolicySchema = require('./userPolicySchema');
|
||||
const resourcePolicySchema = require('./resourcePolicySchema');
|
||||
const errors = require('../errors');
|
||||
import Ajv from 'ajv';
|
||||
import * as userPolicySchema from './userPolicySchema.json';
|
||||
import * as resourcePolicySchema from './resourcePolicySchema.json';
|
||||
import errors from '../errors';
|
||||
import type { ArsenalError } from '../errors';
|
||||
|
||||
const ajValidate = new Ajv({ allErrors: true });
|
||||
ajValidate.addMetaSchema(require('ajv/lib/refs/json-schema-draft-06.json'));
|
||||
|
@ -27,7 +28,7 @@ const errDict = {
|
|||
};
|
||||
|
||||
// parse ajv errors and return early with the first relevant error
|
||||
function _parseErrors(ajvErrors, policyType) {
|
||||
function _parseErrors(ajvErrors: Ajv.ErrorObject[], policyType: string) {
|
||||
let parsedErr;
|
||||
if (policyType === 'user') {
|
||||
// deep copy is needed as we have to assign custom error description
|
||||
|
@ -67,7 +68,7 @@ function _parseErrors(ajvErrors, policyType) {
|
|||
}
|
||||
|
||||
// parse JSON safely without throwing an exception
|
||||
function _safeJSONParse(s) {
|
||||
function _safeJSONParse(s: string): object {
|
||||
try {
|
||||
return JSON.parse(s);
|
||||
} catch (e) {
|
||||
|
@ -75,9 +76,20 @@ function _safeJSONParse(s) {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @typedef ValidationResult
|
||||
* @type Object
|
||||
* @property {Array|null} error - list of validation errors or null
|
||||
* @property {Bool} valid - true/false depending on the validation result
|
||||
*/
|
||||
interface ValidationResult {
|
||||
error: ArsenalError;
|
||||
valid: boolean;
|
||||
}
|
||||
|
||||
// validates policy using the validation schema
|
||||
function _validatePolicy(type, policy) {
|
||||
if (type === 'user') {
|
||||
function _validatePolicy(policyType: string, policy: string): ValidationResult {
|
||||
if (policyType === 'user') {
|
||||
const parseRes = _safeJSONParse(policy);
|
||||
if (parseRes instanceof Error) {
|
||||
return { error: Object.assign({}, errors.MalformedPolicyDocument),
|
||||
|
@ -90,7 +102,7 @@ function _validatePolicy(type, policy) {
|
|||
}
|
||||
return { error: null, valid: true };
|
||||
}
|
||||
if (type === 'resource') {
|
||||
if (policyType === 'resource') {
|
||||
const parseRes = _safeJSONParse(policy);
|
||||
if (parseRes instanceof Error) {
|
||||
return { error: Object.assign({}, errors.MalformedPolicy),
|
||||
|
@ -105,19 +117,14 @@ function _validatePolicy(type, policy) {
|
|||
}
|
||||
return { error: errors.NotImplemented, valid: false };
|
||||
}
|
||||
/**
|
||||
* @typedef ValidationResult
|
||||
* @type Object
|
||||
* @property {Array|null} error - list of validation errors or null
|
||||
* @property {Bool} valid - true/false depending on the validation result
|
||||
*/
|
||||
|
||||
/**
|
||||
* Validates user policy
|
||||
* @param {String} policy - policy json
|
||||
* @returns {Object} - returns object with properties error and value
|
||||
* @returns {ValidationResult} - result of the validation
|
||||
*/
|
||||
function validateUserPolicy(policy) {
|
||||
function validateUserPolicy(policy: string): ValidationResult {
|
||||
return _validatePolicy('user', policy);
|
||||
}
|
||||
|
||||
|
@ -127,11 +134,11 @@ function validateUserPolicy(policy) {
|
|||
* @returns {Object} - returns object with properties error and value
|
||||
* @returns {ValidationResult} - result of the validation
|
||||
*/
|
||||
function validateResourcePolicy(policy) {
|
||||
function validateResourcePolicy(policy: string): ValidationResult {
|
||||
return _validatePolicy('resource', policy);
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
export {
|
||||
validateUserPolicy,
|
||||
validateResourcePolicy,
|
||||
};
|
|
@ -50,7 +50,7 @@ evaluators.isResourceApplicable = (requestContext, statementResource, log) => {
|
|||
requestResourceArr, true);
|
||||
if (arnSegmentsMatch) {
|
||||
log.trace('policy resource is applicable to request',
|
||||
{ requestResource: resource, policyResource });
|
||||
{ requestResource: resource, policyResource });
|
||||
return true;
|
||||
}
|
||||
continue;
|
||||
|
@ -224,21 +224,21 @@ evaluators.evaluatePolicy = (requestContext, policy, log) => {
|
|||
// in policy, move on to next statement
|
||||
if (currentStatement.NotResource &&
|
||||
evaluators.isResourceApplicable(requestContext,
|
||||
currentStatement.NotResource, log)) {
|
||||
currentStatement.NotResource, log)) {
|
||||
continue;
|
||||
}
|
||||
// If affirmative action is in policy and request action is not
|
||||
// applicable, move on to next statement
|
||||
if (currentStatement.Action &&
|
||||
!evaluators.isActionApplicable(requestContext.getAction(),
|
||||
currentStatement.Action, log)) {
|
||||
currentStatement.Action, log)) {
|
||||
continue;
|
||||
}
|
||||
// If NotAction is in policy and action matches NotAction in policy,
|
||||
// move on to next statement
|
||||
if (currentStatement.NotAction &&
|
||||
evaluators.isActionApplicable(requestContext.getAction(),
|
||||
currentStatement.NotAction, log)) {
|
||||
currentStatement.NotAction, log)) {
|
||||
continue;
|
||||
}
|
||||
const conditionEval = currentStatement.Condition ?
|
||||
|
|
|
@ -39,11 +39,11 @@ conditions.findConditionKey = (key, requestContext) => {
|
|||
// (see Boolean Condition Operators).
|
||||
// Note: This key is only present if MFA was used. So, the following
|
||||
// will not work:
|
||||
// "Condition" :
|
||||
// { "Bool" : { "aws:MultiFactorAuthPresent" : false } }
|
||||
// "Condition" :
|
||||
// { "Bool" : { "aws:MultiFactorAuthPresent" : false } }
|
||||
// Instead use:
|
||||
// "Condition" :
|
||||
// { "Null" : { "aws:MultiFactorAuthPresent" : true } }
|
||||
// "Condition" :
|
||||
// { "Null" : { "aws:MultiFactorAuthPresent" : true } }
|
||||
map.set('aws:MultiFactorAuthPresent',
|
||||
requestContext.getMultiFactorAuthPresent());
|
||||
// aws:MultiFactorAuthAge – Used to check how many seconds since
|
||||
|
@ -166,8 +166,8 @@ conditions.findConditionKey = (key, requestContext) => {
|
|||
// so evaluation should be skipped
|
||||
map.set('s3:RequestObjectTagKeys',
|
||||
requestContext.getNeedTagEval() && requestContext.getRequestObjTags()
|
||||
? getTagKeys(requestContext.getRequestObjTags())
|
||||
: undefined);
|
||||
? getTagKeys(requestContext.getRequestObjTags())
|
||||
: undefined);
|
||||
return map.get(key);
|
||||
};
|
||||
|
||||
|
@ -191,7 +191,7 @@ function convertSpecialChars(string) {
|
|||
return map[char];
|
||||
}
|
||||
return string.replace(/(\$\{\*\})|(\$\{\?\})|(\$\{\$\})/g,
|
||||
characterMap);
|
||||
characterMap);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -425,10 +425,10 @@ conditions.convertConditionOperator = operator => {
|
|||
return !operatorMap.ArnLike(key, value);
|
||||
},
|
||||
Null: function nullOperator(key, value) {
|
||||
// Null is used to check if a condition key is present.
|
||||
// The policy statement value should be either true (the key doesn't
|
||||
// exist — it is null) or false (the key exists and its value is
|
||||
// not null).
|
||||
// Null is used to check if a condition key is present.
|
||||
// The policy statement value should be either true (the key doesn't
|
||||
// exist — it is null) or false (the key exists and its value is
|
||||
// not null).
|
||||
if ((key === undefined || key === null)
|
||||
&& value[0] === 'true' ||
|
||||
(key !== undefined && key !== null)
|
||||
|
|
|
@ -51,10 +51,10 @@ wildcards.handleWildcardInResource = arn => {
|
|||
// Wildcards can be part of the resource ARN.
|
||||
// Wildcards do NOT span segments of the ARN (separated by ":")
|
||||
|
||||
// Example: all elements in specific bucket:
|
||||
// "Resource": "arn:aws:s3:::my_corporate_bucket/*"
|
||||
// ARN format:
|
||||
// arn:partition:service:region:namespace:relative-id
|
||||
// Example: all elements in specific bucket:
|
||||
// "Resource": "arn:aws:s3:::my_corporate_bucket/*"
|
||||
// ARN format:
|
||||
// arn:partition:service:region:namespace:relative-id
|
||||
const arnArr = arn.split(':');
|
||||
return arnArr.map(portion => wildcards.handleWildcards(portion));
|
||||
};
|
||||
|
|
|
@ -6,7 +6,6 @@ const crypto = require('crypto');
|
|||
* data through a stream
|
||||
*/
|
||||
class MD5Sum extends Transform {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*/
|
||||
|
@ -40,7 +39,6 @@ class MD5Sum extends Transform {
|
|||
this.emit('hashed');
|
||||
callback(null);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
module.exports = MD5Sum;
|
||||
|
|
|
@ -73,7 +73,7 @@ class ResultsCollector extends EventEmitter {
|
|||
* @property {Error} [results[].error] - error returned by Azure putting subpart
|
||||
* @property {number} results[].subPartIndex - index of the subpart
|
||||
*/
|
||||
/**
|
||||
/**
|
||||
* "error" event
|
||||
* @event ResultCollector#error
|
||||
* @type {(Error|undefined)} error - error returned by Azure last subpart
|
||||
|
|
|
@ -94,7 +94,7 @@ azureMpuUtils.getSubPartIds = (part, uploadId) =>
|
|||
azureMpuUtils.getBlockId(uploadId, part.partNumber, subPartIndex));
|
||||
|
||||
azureMpuUtils.putSinglePart = (errorWrapperFn, request, params, dataStoreName,
|
||||
log, cb) => {
|
||||
log, cb) => {
|
||||
const { bucketName, partNumber, size, objectKey, contentMD5, uploadId }
|
||||
= params;
|
||||
const blockId = azureMpuUtils.getBlockId(uploadId, partNumber, 0);
|
||||
|
@ -107,31 +107,31 @@ log, cb) => {
|
|||
request.pipe(passThrough);
|
||||
return errorWrapperFn('uploadPart', 'createBlockFromStream',
|
||||
[blockId, bucketName, objectKey, passThrough, size, options,
|
||||
(err, result) => {
|
||||
if (err) {
|
||||
log.error('Error from Azure data backend uploadPart',
|
||||
{ error: err.message, dataStoreName });
|
||||
if (err.code === 'ContainerNotFound') {
|
||||
return cb(errors.NoSuchBucket);
|
||||
(err, result) => {
|
||||
if (err) {
|
||||
log.error('Error from Azure data backend uploadPart',
|
||||
{ error: err.message, dataStoreName });
|
||||
if (err.code === 'ContainerNotFound') {
|
||||
return cb(errors.NoSuchBucket);
|
||||
}
|
||||
if (err.code === 'InvalidMd5') {
|
||||
return cb(errors.InvalidDigest);
|
||||
}
|
||||
if (err.code === 'Md5Mismatch') {
|
||||
return cb(errors.BadDigest);
|
||||
}
|
||||
return cb(errors.InternalError.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`),
|
||||
);
|
||||
}
|
||||
if (err.code === 'InvalidMd5') {
|
||||
return cb(errors.InvalidDigest);
|
||||
}
|
||||
if (err.code === 'Md5Mismatch') {
|
||||
return cb(errors.BadDigest);
|
||||
}
|
||||
return cb(errors.InternalError.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`)
|
||||
);
|
||||
}
|
||||
const md5 = result.headers['content-md5'] || '';
|
||||
const eTag = objectUtils.getHexMD5(md5);
|
||||
return cb(null, eTag, size);
|
||||
}], log, cb);
|
||||
const md5 = result.headers['content-md5'] || '';
|
||||
const eTag = objectUtils.getHexMD5(md5);
|
||||
return cb(null, eTag, size);
|
||||
}], log, cb);
|
||||
};
|
||||
|
||||
azureMpuUtils.putNextSubPart = (errorWrapperFn, partParams, subPartInfo,
|
||||
subPartStream, subPartIndex, resultsCollector, log, cb) => {
|
||||
subPartStream, subPartIndex, resultsCollector, log, cb) => {
|
||||
const { uploadId, partNumber, bucketName, objectKey } = partParams;
|
||||
const subPartSize = azureMpuUtils.getSubPartSize(
|
||||
subPartInfo, subPartIndex);
|
||||
|
@ -140,11 +140,11 @@ subPartStream, subPartIndex, resultsCollector, log, cb) => {
|
|||
resultsCollector.pushOp();
|
||||
errorWrapperFn('uploadPart', 'createBlockFromStream',
|
||||
[subPartId, bucketName, objectKey, subPartStream, subPartSize,
|
||||
{}, err => resultsCollector.pushResult(err, subPartIndex)], log, cb);
|
||||
{}, err => resultsCollector.pushResult(err, subPartIndex)], log, cb);
|
||||
};
|
||||
|
||||
azureMpuUtils.putSubParts = (errorWrapperFn, request, params,
|
||||
dataStoreName, log, cb) => {
|
||||
dataStoreName, log, cb) => {
|
||||
const subPartInfo = azureMpuUtils.getSubPartInfo(params.size);
|
||||
const resultsCollector = new ResultsCollector();
|
||||
const hashedStream = new MD5Sum();
|
||||
|
|
|
@ -31,9 +31,9 @@ convertMethods.listMultipartUploads = xmlParams => {
|
|||
const l = xmlParams.list;
|
||||
|
||||
xml.push('<?xml version="1.0" encoding="UTF-8"?>',
|
||||
'<ListMultipartUploadsResult ' +
|
||||
'<ListMultipartUploadsResult ' +
|
||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">',
|
||||
`<Bucket>${escapeForXml(xmlParams.bucketName)}</Bucket>`
|
||||
`<Bucket>${escapeForXml(xmlParams.bucketName)}</Bucket>`,
|
||||
);
|
||||
|
||||
// For certain XML elements, if it is `undefined`, AWS returns either an
|
||||
|
@ -58,7 +58,7 @@ convertMethods.listMultipartUploads = xmlParams => {
|
|||
});
|
||||
|
||||
xml.push(`<MaxUploads>${escapeForXml(l.MaxKeys)}</MaxUploads>`,
|
||||
`<IsTruncated>${escapeForXml(l.IsTruncated)}</IsTruncated>`
|
||||
`<IsTruncated>${escapeForXml(l.IsTruncated)}</IsTruncated>`,
|
||||
);
|
||||
|
||||
l.Uploads.forEach(upload => {
|
||||
|
@ -69,29 +69,29 @@ convertMethods.listMultipartUploads = xmlParams => {
|
|||
}
|
||||
|
||||
xml.push('<Upload>',
|
||||
`<Key>${escapeForXml(key)}</Key>`,
|
||||
`<UploadId>${escapeForXml(val.UploadId)}</UploadId>`,
|
||||
'<Initiator>',
|
||||
`<ID>${escapeForXml(val.Initiator.ID)}</ID>`,
|
||||
`<DisplayName>${escapeForXml(val.Initiator.DisplayName)}` +
|
||||
`<Key>${escapeForXml(key)}</Key>`,
|
||||
`<UploadId>${escapeForXml(val.UploadId)}</UploadId>`,
|
||||
'<Initiator>',
|
||||
`<ID>${escapeForXml(val.Initiator.ID)}</ID>`,
|
||||
`<DisplayName>${escapeForXml(val.Initiator.DisplayName)}` +
|
||||
'</DisplayName>',
|
||||
'</Initiator>',
|
||||
'<Owner>',
|
||||
`<ID>${escapeForXml(val.Owner.ID)}</ID>`,
|
||||
`<DisplayName>${escapeForXml(val.Owner.DisplayName)}` +
|
||||
'</Initiator>',
|
||||
'<Owner>',
|
||||
`<ID>${escapeForXml(val.Owner.ID)}</ID>`,
|
||||
`<DisplayName>${escapeForXml(val.Owner.DisplayName)}` +
|
||||
'</DisplayName>',
|
||||
'</Owner>',
|
||||
`<StorageClass>${escapeForXml(val.StorageClass)}` +
|
||||
'</Owner>',
|
||||
`<StorageClass>${escapeForXml(val.StorageClass)}` +
|
||||
'</StorageClass>',
|
||||
`<Initiated>${escapeForXml(val.Initiated)}</Initiated>`,
|
||||
'</Upload>'
|
||||
`<Initiated>${escapeForXml(val.Initiated)}</Initiated>`,
|
||||
'</Upload>',
|
||||
);
|
||||
});
|
||||
|
||||
l.CommonPrefixes.forEach(prefix => {
|
||||
xml.push('<CommonPrefixes>',
|
||||
`<Prefix>${escapeForXml(prefix)}</Prefix>`,
|
||||
'</CommonPrefixes>'
|
||||
`<Prefix>${escapeForXml(prefix)}</Prefix>`,
|
||||
'</CommonPrefixes>',
|
||||
);
|
||||
});
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@ const Readable = require('stream').Readable;
|
|||
* This class is used to produce zeros filled buffers for a reader consumption
|
||||
*/
|
||||
class NullStream extends Readable {
|
||||
|
||||
/**
|
||||
* Construct a new zeros filled buffers producer that will
|
||||
* produce as much bytes as specified by the range parameter, or the size
|
||||
|
@ -32,8 +31,8 @@ class NullStream extends Readable {
|
|||
_read(size) {
|
||||
const toRead = Math.min(size, this.bytesToRead);
|
||||
const buffer = toRead > 0
|
||||
? Buffer.alloc(toRead, 0)
|
||||
: null;
|
||||
? Buffer.alloc(toRead, 0)
|
||||
: null;
|
||||
this.bytesToRead -= toRead;
|
||||
this.push(buffer);
|
||||
}
|
||||
|
|
|
@ -110,7 +110,7 @@ function generateMpuPartStorageInfo(filteredPartList) {
|
|||
* and extraPartLocations
|
||||
*/
|
||||
function validateAndFilterMpuParts(storedParts, jsonList, mpuOverviewKey,
|
||||
splitter, log) {
|
||||
splitter, log) {
|
||||
let storedPartsCopy = [];
|
||||
const filteredPartsObj = {};
|
||||
filteredPartsObj.partList = [];
|
||||
|
|
|
@ -4,11 +4,11 @@ const errors = require('../errors');
|
|||
const escapeForXml = require('./escapeForXml');
|
||||
|
||||
const errorInvalidArgument = errors.InvalidArgument
|
||||
.customizeDescription('The header \'x-amz-tagging\' shall be ' +
|
||||
.customizeDescription('The header \'x-amz-tagging\' shall be ' +
|
||||
'encoded as UTF-8 then URLEncoded URL query parameters without ' +
|
||||
'tag name duplicates.');
|
||||
const errorBadRequestLimit50 = errors.BadRequest
|
||||
.customizeDescription('Object tags cannot be greater than 50');
|
||||
.customizeDescription('Object tags cannot be greater than 50');
|
||||
|
||||
/*
|
||||
Format of xml request:
|
||||
|
@ -38,7 +38,7 @@ const _validator = {
|
|||
result.Tagging.TagSet &&
|
||||
result.Tagging.TagSet.length === 1 &&
|
||||
(
|
||||
result.Tagging.TagSet[0] === '' ||
|
||||
result.Tagging.TagSet[0] === '' ||
|
||||
result.Tagging.TagSet[0] &&
|
||||
Object.keys(result.Tagging.TagSet[0]).length === 1 &&
|
||||
result.Tagging.TagSet[0].Tag &&
|
||||
|
@ -155,7 +155,7 @@ function parseTagXml(xml, log, cb) {
|
|||
function convertToXml(objectTags) {
|
||||
const xml = [];
|
||||
xml.push('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>',
|
||||
'<Tagging> <TagSet>');
|
||||
'<Tagging> <TagSet>');
|
||||
if (objectTags && Object.keys(objectTags).length > 0) {
|
||||
Object.keys(objectTags).forEach(key => {
|
||||
xml.push(`<Tag><Key>${escapeForXml(key)}</Key>` +
|
||||
|
|
|
@ -42,7 +42,7 @@ function checkBucketAndKey(bucketName, objectKey, method, reqQuery,
|
|||
log.debug('empty bucket name', { method: 'routes' });
|
||||
return (method !== 'OPTIONS') ?
|
||||
errors.MethodNotAllowed : errors.AccessForbidden
|
||||
.customizeDescription('CORSResponse: Bucket not found');
|
||||
.customizeDescription('CORSResponse: Bucket not found');
|
||||
}
|
||||
if (bucketName !== undefined && routesUtils.isValidBucketName(bucketName,
|
||||
blacklistedPrefixes.bucket) === false) {
|
||||
|
@ -90,7 +90,7 @@ function checkTypes(req, res, params, logger) {
|
|||
'bad routes param: internalHandlers must be an object');
|
||||
if (params.statsClient) {
|
||||
assert.strictEqual(typeof params.statsClient, 'object',
|
||||
'bad routes param: statsClient must be an object');
|
||||
'bad routes param: statsClient must be an object');
|
||||
}
|
||||
assert(Array.isArray(params.allEndpoints),
|
||||
'bad routes param: allEndpoints must be an array');
|
||||
|
@ -98,13 +98,13 @@ function checkTypes(req, res, params, logger) {
|
|||
'bad routes param: allEndpoints must have at least one endpoint');
|
||||
params.allEndpoints.forEach(endpoint => {
|
||||
assert.strictEqual(typeof endpoint, 'string',
|
||||
'bad routes param: each item in allEndpoints must be a string');
|
||||
'bad routes param: each item in allEndpoints must be a string');
|
||||
});
|
||||
assert(Array.isArray(params.websiteEndpoints),
|
||||
'bad routes param: allEndpoints must be an array');
|
||||
params.websiteEndpoints.forEach(endpoint => {
|
||||
assert.strictEqual(typeof endpoint, 'string',
|
||||
'bad routes param: each item in websiteEndpoints must be a string');
|
||||
'bad routes param: each item in websiteEndpoints must be a string');
|
||||
});
|
||||
assert.strictEqual(typeof params.blacklistedPrefixes, 'object',
|
||||
'bad routes param: blacklistedPrefixes must be an object');
|
||||
|
@ -112,13 +112,13 @@ function checkTypes(req, res, params, logger) {
|
|||
'bad routes param: blacklistedPrefixes.bucket must be an array');
|
||||
params.blacklistedPrefixes.bucket.forEach(pre => {
|
||||
assert.strictEqual(typeof pre, 'string',
|
||||
'bad routes param: each blacklisted bucket prefix must be a string');
|
||||
'bad routes param: each blacklisted bucket prefix must be a string');
|
||||
});
|
||||
assert(Array.isArray(params.blacklistedPrefixes.object),
|
||||
'bad routes param: blacklistedPrefixes.object must be an array');
|
||||
params.blacklistedPrefixes.object.forEach(pre => {
|
||||
assert.strictEqual(typeof pre, 'string',
|
||||
'bad routes param: each blacklisted object prefix must be a string');
|
||||
'bad routes param: each blacklisted object prefix must be a string');
|
||||
});
|
||||
assert.strictEqual(typeof params.dataRetrievalParams, 'object',
|
||||
'bad routes param: dataRetrievalParams must be a defined object');
|
||||
|
@ -173,8 +173,8 @@ function routes(req, res, params, logger) {
|
|||
reqUids = undefined;
|
||||
}
|
||||
const log = (reqUids !== undefined ?
|
||||
logger.newRequestLoggerFromSerializedUids(reqUids) :
|
||||
logger.newRequestLogger());
|
||||
logger.newRequestLoggerFromSerializedUids(reqUids) :
|
||||
logger.newRequestLogger());
|
||||
|
||||
if (!req.url.startsWith('/_/healthcheck') &&
|
||||
!req.url.startsWith('/_/report')) {
|
||||
|
@ -210,7 +210,7 @@ function routes(req, res, params, logger) {
|
|||
return routesUtils.responseXMLBody(
|
||||
errors.InvalidURI.customizeDescription('Could not parse the ' +
|
||||
'specified URI. Check your restEndpoints configuration.'),
|
||||
undefined, res, log);
|
||||
undefined, res, log);
|
||||
}
|
||||
|
||||
log.addDefaultFields({
|
||||
|
@ -232,7 +232,7 @@ function routes(req, res, params, logger) {
|
|||
|
||||
if (bucketOrKeyError) {
|
||||
log.trace('error with bucket or key value',
|
||||
{ error: bucketOrKeyError });
|
||||
{ error: bucketOrKeyError });
|
||||
return routesUtils.responseXMLBody(bucketOrKeyError, null, res, log);
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ function routeDELETE(request, response, api, log, statsClient) {
|
|||
if (request.query.uploadId) {
|
||||
if (request.objectKey === undefined) {
|
||||
return routesUtils.responseNoBody(
|
||||
errors.InvalidRequest.customizeDescription('A key must be ' +
|
||||
errors.InvalidRequest.customizeDescription('A key must be ' +
|
||||
'specified'), null, response, 200, log);
|
||||
}
|
||||
api.callApiMethod('multipartDelete', request, response, log,
|
||||
|
@ -19,77 +19,77 @@ function routeDELETE(request, response, api, log, statsClient) {
|
|||
} else if (request.objectKey === undefined) {
|
||||
if (request.query.website !== undefined) {
|
||||
return api.callApiMethod('bucketDeleteWebsite', request,
|
||||
response, log, (err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 204, log);
|
||||
});
|
||||
response, log, (err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 204, log);
|
||||
});
|
||||
} else if (request.query.cors !== undefined) {
|
||||
return api.callApiMethod('bucketDeleteCors', request, response,
|
||||
log, (err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 204, log);
|
||||
});
|
||||
log, (err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 204, log);
|
||||
});
|
||||
} else if (request.query.replication !== undefined) {
|
||||
return api.callApiMethod('bucketDeleteReplication', request,
|
||||
response, log, (err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 204, log);
|
||||
});
|
||||
response, log, (err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 204, log);
|
||||
});
|
||||
} else if (request.query.lifecycle !== undefined) {
|
||||
return api.callApiMethod('bucketDeleteLifecycle', request,
|
||||
response, log, (err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 204, log);
|
||||
});
|
||||
response, log, (err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 204, log);
|
||||
});
|
||||
} else if (request.query.policy !== undefined) {
|
||||
return api.callApiMethod('bucketDeletePolicy', request,
|
||||
response, log, (err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 204, log);
|
||||
});
|
||||
response, log, (err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 204, log);
|
||||
});
|
||||
} else if (request.query.encryption !== undefined) {
|
||||
return api.callApiMethod('bucketDeleteEncryption', request,
|
||||
response, log, (err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 204, log);
|
||||
});
|
||||
response, log, (err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 204, log);
|
||||
});
|
||||
}
|
||||
api.callApiMethod('bucketDelete', request, response, log,
|
||||
(err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders, response,
|
||||
204, log);
|
||||
});
|
||||
(err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders, response,
|
||||
204, log);
|
||||
});
|
||||
} else {
|
||||
if (request.query.tagging !== undefined) {
|
||||
return api.callApiMethod('objectDeleteTagging', request,
|
||||
response, log, (err, resHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, resHeaders,
|
||||
response, 204, log);
|
||||
});
|
||||
response, log, (err, resHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, resHeaders,
|
||||
response, 204, log);
|
||||
});
|
||||
}
|
||||
api.callApiMethod('objectDelete', request, response, log,
|
||||
(err, corsHeaders) => {
|
||||
/*
|
||||
(err, corsHeaders) => {
|
||||
/*
|
||||
* Since AWS expects a 204 regardless of the existence of
|
||||
the object, the errors NoSuchKey and NoSuchVersion should not
|
||||
* be sent back as a response.
|
||||
*/
|
||||
if (err && !err.NoSuchKey && !err.NoSuchVersion) {
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, null, log);
|
||||
}
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(null, corsHeaders, response,
|
||||
204, log);
|
||||
});
|
||||
if (err && !err.NoSuchKey && !err.NoSuchVersion) {
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, null, log);
|
||||
}
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(null, corsHeaders, response,
|
||||
204, log);
|
||||
});
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ const errors = require('../../errors');
|
|||
const routesUtils = require('../routesUtils');
|
||||
|
||||
function routerGET(request, response, api, log, statsClient,
|
||||
dataRetrievalParams) {
|
||||
dataRetrievalParams) {
|
||||
log.debug('routing request', { method: 'routerGET' });
|
||||
if (request.bucketName === undefined && request.objectKey !== undefined) {
|
||||
routesUtils.responseXMLBody(errors.NoSuchBucket, null, response, log);
|
||||
|
@ -17,18 +17,18 @@ dataRetrievalParams) {
|
|||
// GET bucket ACL
|
||||
if (request.query.acl !== undefined) {
|
||||
api.callApiMethod('bucketGetACL', request, response, log,
|
||||
(err, xml, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseXMLBody(err, xml, response, log,
|
||||
corsHeaders);
|
||||
});
|
||||
(err, xml, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseXMLBody(err, xml, response, log,
|
||||
corsHeaders);
|
||||
});
|
||||
} else if (request.query.replication !== undefined) {
|
||||
api.callApiMethod('bucketGetReplication', request, response, log,
|
||||
(err, xml, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseXMLBody(err, xml, response, log,
|
||||
corsHeaders);
|
||||
});
|
||||
(err, xml, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseXMLBody(err, xml, response, log,
|
||||
corsHeaders);
|
||||
});
|
||||
} else if (request.query.cors !== undefined) {
|
||||
api.callApiMethod('bucketGetCors', request, response, log,
|
||||
(err, xml, corsHeaders) => {
|
||||
|
@ -70,7 +70,7 @@ dataRetrievalParams) {
|
|||
(err, xml, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseXMLBody(err, xml, response, log,
|
||||
corsHeaders);
|
||||
corsHeaders);
|
||||
});
|
||||
} else if (request.query.policy !== undefined) {
|
||||
api.callApiMethod('bucketGetPolicy', request, response, log,
|
||||
|
@ -95,11 +95,11 @@ dataRetrievalParams) {
|
|||
});
|
||||
} else if (request.query.encryption !== undefined) {
|
||||
api.callApiMethod('bucketGetEncryption', request, response, log,
|
||||
(err, xml, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseXMLBody(err, xml, response,
|
||||
log, corsHeaders);
|
||||
});
|
||||
(err, xml, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseXMLBody(err, xml, response,
|
||||
log, corsHeaders);
|
||||
});
|
||||
} else {
|
||||
// GET bucket
|
||||
api.callApiMethod('bucketGet', request, response, log,
|
||||
|
|
|
@ -21,11 +21,11 @@ function routeOPTIONS(request, response, api, log, statsClient) {
|
|||
}
|
||||
|
||||
return api.callApiMethod('corsPreflight', request, response, log,
|
||||
(err, resHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, resHeaders, response, 200,
|
||||
log);
|
||||
});
|
||||
(err, resHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, resHeaders, response, 200,
|
||||
log);
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = routeOPTIONS;
|
||||
|
|
|
@ -27,28 +27,28 @@ function routePOST(request, response, api, log) {
|
|||
if (request.query.uploads !== undefined) {
|
||||
return api.callApiMethod('initiateMultipartUpload', request,
|
||||
response, log, (err, result, corsHeaders) =>
|
||||
routesUtils.responseXMLBody(err, result, response, log,
|
||||
corsHeaders));
|
||||
routesUtils.responseXMLBody(err, result, response, log,
|
||||
corsHeaders));
|
||||
}
|
||||
|
||||
// POST complete multipart upload
|
||||
if (request.query.uploadId !== undefined) {
|
||||
return api.callApiMethod('completeMultipartUpload', request,
|
||||
response, log, (err, result, resHeaders) =>
|
||||
routesUtils.responseXMLBody(err, result, response, log,
|
||||
resHeaders));
|
||||
routesUtils.responseXMLBody(err, result, response, log,
|
||||
resHeaders));
|
||||
}
|
||||
|
||||
// POST multiObjectDelete
|
||||
if (request.query.delete !== undefined) {
|
||||
return api.callApiMethod('multiObjectDelete', request, response,
|
||||
log, (err, xml, corsHeaders) =>
|
||||
routesUtils.responseXMLBody(err, xml, response, log,
|
||||
corsHeaders));
|
||||
routesUtils.responseXMLBody(err, xml, response, log,
|
||||
corsHeaders));
|
||||
}
|
||||
|
||||
return routesUtils.responseNoBody(errors.NotImplemented, null, response,
|
||||
200, log);
|
||||
200, log);
|
||||
}
|
||||
/* eslint-enable no-param-reassign */
|
||||
module.exports = routePOST;
|
||||
|
|
|
@ -14,16 +14,16 @@ function routePUT(request, response, api, log, statsClient) {
|
|||
|| contentLength < 0)) || contentLength === '') {
|
||||
log.debug('invalid content-length header');
|
||||
return routesUtils.responseNoBody(
|
||||
errors.BadRequest, null, response, null, log);
|
||||
errors.BadRequest, null, response, null, log);
|
||||
}
|
||||
// PUT bucket ACL
|
||||
if (request.query.acl !== undefined) {
|
||||
api.callApiMethod('bucketPutACL', request, response, log,
|
||||
(err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 200, log);
|
||||
});
|
||||
(err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 200, log);
|
||||
});
|
||||
} else if (request.query.versioning !== undefined) {
|
||||
api.callApiMethod('bucketPutVersioning', request, response, log,
|
||||
(err, corsHeaders) => {
|
||||
|
@ -82,11 +82,11 @@ function routePUT(request, response, api, log, statsClient) {
|
|||
});
|
||||
} else if (request.query.encryption !== undefined) {
|
||||
api.callApiMethod('bucketPutEncryption', request, response, log,
|
||||
(err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 200, log);
|
||||
});
|
||||
(err, corsHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, corsHeaders,
|
||||
response, 200, log);
|
||||
});
|
||||
} else {
|
||||
// PUT bucket
|
||||
return api.callApiMethod('bucketPut', request, response, log,
|
||||
|
@ -110,7 +110,7 @@ function routePUT(request, response, api, log, statsClient) {
|
|||
method: 'routePUT',
|
||||
});
|
||||
return routesUtils
|
||||
.responseNoBody(errors.InvalidDigest, null, response, 200, log);
|
||||
.responseNoBody(errors.InvalidDigest, null, response, 200, log);
|
||||
}
|
||||
if (request.headers['content-md5']) {
|
||||
request.contentMD5 = request.headers['content-md5'];
|
||||
|
@ -126,17 +126,17 @@ function routePUT(request, response, api, log, statsClient) {
|
|||
});
|
||||
return routesUtils
|
||||
.responseNoBody(errors.InvalidDigest, null, response, 200,
|
||||
log);
|
||||
log);
|
||||
}
|
||||
}
|
||||
if (request.query.partNumber) {
|
||||
if (request.headers['x-amz-copy-source']) {
|
||||
api.callApiMethod('objectPutCopyPart', request, response, log,
|
||||
(err, xml, additionalHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseXMLBody(err, xml, response, log,
|
||||
(err, xml, additionalHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseXMLBody(err, xml, response, log,
|
||||
additionalHeaders);
|
||||
});
|
||||
});
|
||||
} else {
|
||||
api.callApiMethod('objectPutPart', request, response, log,
|
||||
(err, calculatedHash, corsHeaders) => {
|
||||
|
@ -202,11 +202,11 @@ function routePUT(request, response, api, log, statsClient) {
|
|||
contentLength: request.parsedContentLength,
|
||||
});
|
||||
api.callApiMethod('objectPut', request, response, log,
|
||||
(err, resHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, resHeaders,
|
||||
response, 200, log);
|
||||
});
|
||||
(err, resHeaders) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
return routesUtils.responseNoBody(err, resHeaders,
|
||||
response, 200, log);
|
||||
});
|
||||
}
|
||||
}
|
||||
return undefined;
|
||||
|
|
|
@ -14,7 +14,7 @@ function routerWebsite(request, response, api, log, statsClient,
|
|||
if (request.method === 'GET') {
|
||||
return api.callApiMethod('websiteGet', request, response, log,
|
||||
(err, userErrorPageFailure, dataGetInfo, resMetaHeaders,
|
||||
redirectInfo, key) => {
|
||||
redirectInfo, key) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
// request being redirected
|
||||
if (redirectInfo) {
|
||||
|
@ -43,21 +43,21 @@ function routerWebsite(request, response, api, log, statsClient,
|
|||
}
|
||||
if (request.method === 'HEAD') {
|
||||
return api.callApiMethod('websiteHead', request, response, log,
|
||||
(err, resMetaHeaders, redirectInfo, key) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
if (redirectInfo) {
|
||||
return routesUtils.redirectRequest(redirectInfo,
|
||||
key, request.connection.encrypted,
|
||||
response, request.headers.host, resMetaHeaders, log);
|
||||
}
|
||||
// could redirect on err so check for redirectInfo first
|
||||
if (err) {
|
||||
return routesUtils.errorHeaderResponse(err, response,
|
||||
resMetaHeaders, log);
|
||||
}
|
||||
return routesUtils.responseContentHeaders(err, {}, resMetaHeaders,
|
||||
response, log);
|
||||
});
|
||||
(err, resMetaHeaders, redirectInfo, key) => {
|
||||
routesUtils.statsReport500(err, statsClient);
|
||||
if (redirectInfo) {
|
||||
return routesUtils.redirectRequest(redirectInfo,
|
||||
key, request.connection.encrypted,
|
||||
response, request.headers.host, resMetaHeaders, log);
|
||||
}
|
||||
// could redirect on err so check for redirectInfo first
|
||||
if (err) {
|
||||
return routesUtils.errorHeaderResponse(err, response,
|
||||
resMetaHeaders, log);
|
||||
}
|
||||
return routesUtils.responseContentHeaders(err, {}, resMetaHeaders,
|
||||
response, log);
|
||||
});
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ function setCommonResponseHeaders(headers, response, log) {
|
|||
} catch (e) {
|
||||
log.debug('header can not be added ' +
|
||||
'to the response', { header: headers[key],
|
||||
error: e.stack, method: 'setCommonResponseHeaders' });
|
||||
error: e.stack, method: 'setCommonResponseHeaders' });
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -71,7 +71,7 @@ const XMLResponseBackend = {
|
|||
* @return {object} response - response object with additional headers
|
||||
*/
|
||||
okResponse: function okXMLResponse(xml, response, log,
|
||||
additionalHeaders) {
|
||||
additionalHeaders) {
|
||||
const bytesSent = Buffer.byteLength(xml);
|
||||
log.trace('sending success xml response');
|
||||
log.addDefaultFields({
|
||||
|
@ -118,7 +118,7 @@ const XMLResponseBackend = {
|
|||
`<Message>${errCode.description}</Message>`,
|
||||
'<Resource></Resource>',
|
||||
`<RequestId>${log.getSerializedUids()}</RequestId>`,
|
||||
'</Error>'
|
||||
'</Error>',
|
||||
);
|
||||
const xmlStr = xml.join('');
|
||||
const bytesSent = Buffer.byteLength(xmlStr);
|
||||
|
@ -148,7 +148,7 @@ const JSONResponseBackend = {
|
|||
* @return {object} response - response object with additional headers
|
||||
*/
|
||||
okResponse: function okJSONResponse(json, response, log,
|
||||
additionalHeaders) {
|
||||
additionalHeaders) {
|
||||
const bytesSent = Buffer.byteLength(json);
|
||||
log.trace('sending success json response');
|
||||
log.addDefaultFields({
|
||||
|
@ -166,7 +166,7 @@ const JSONResponseBackend = {
|
|||
},
|
||||
|
||||
errorResponse: function errorJSONResponse(errCode, response, log,
|
||||
corsHeaders) {
|
||||
corsHeaders) {
|
||||
log.trace('sending error json response', { errCode });
|
||||
/*
|
||||
{
|
||||
|
@ -369,27 +369,27 @@ function retrieveData(locations, retrieveDataParams, response, log) {
|
|||
currentStream = readable;
|
||||
return readable.pipe(response, { end: false });
|
||||
}), err => {
|
||||
currentStream = null;
|
||||
if (err) {
|
||||
log.debug('abort response due to error', {
|
||||
error: err.code, errMsg: err.message });
|
||||
}
|
||||
// call end for all cases (error/success) per node.js docs
|
||||
// recommendation
|
||||
response.end();
|
||||
currentStream = null;
|
||||
if (err) {
|
||||
log.debug('abort response due to error', {
|
||||
error: err.code, errMsg: err.message });
|
||||
}
|
||||
// call end for all cases (error/success) per node.js docs
|
||||
// recommendation
|
||||
response.end();
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
function _responseBody(responseBackend, errCode, payload, response, log,
|
||||
additionalHeaders) {
|
||||
additionalHeaders) {
|
||||
if (errCode && !response.headersSent) {
|
||||
return responseBackend.errorResponse(errCode, response, log,
|
||||
additionalHeaders);
|
||||
additionalHeaders);
|
||||
}
|
||||
if (!response.headersSent) {
|
||||
return responseBackend.okResponse(payload, response, log,
|
||||
additionalHeaders);
|
||||
additionalHeaders);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
@ -398,8 +398,8 @@ function _computeContentLengthFromLocation(dataLocations) {
|
|||
return dataLocations.reduce(
|
||||
(sum, location) => (sum !== undefined &&
|
||||
(typeof location.size === 'number' || typeof location.size === 'string') ?
|
||||
sum + Number.parseInt(location.size, 10) :
|
||||
undefined), 0);
|
||||
sum + Number.parseInt(location.size, 10) :
|
||||
undefined), 0);
|
||||
}
|
||||
|
||||
function _contentLengthMatchesLocations(contentLength, dataLocations) {
|
||||
|
@ -420,7 +420,7 @@ const routesUtils = {
|
|||
*/
|
||||
responseXMLBody(errCode, xml, response, log, additionalHeaders) {
|
||||
return _responseBody(XMLResponseBackend, errCode, xml, response,
|
||||
log, additionalHeaders);
|
||||
log, additionalHeaders);
|
||||
},
|
||||
|
||||
/**
|
||||
|
@ -434,7 +434,7 @@ const routesUtils = {
|
|||
*/
|
||||
responseJSONBody(errCode, json, response, log, additionalHeaders) {
|
||||
return _responseBody(JSONResponseBackend, errCode, json, response,
|
||||
log, additionalHeaders);
|
||||
log, additionalHeaders);
|
||||
},
|
||||
|
||||
/**
|
||||
|
@ -449,7 +449,7 @@ const routesUtils = {
|
|||
responseNoBody(errCode, resHeaders, response, httpCode = 200, log) {
|
||||
if (errCode && !response.headersSent) {
|
||||
return XMLResponseBackend.errorResponse(errCode, response, log,
|
||||
resHeaders);
|
||||
resHeaders);
|
||||
}
|
||||
if (!response.headersSent) {
|
||||
return okHeaderResponse(resHeaders, response, httpCode, log);
|
||||
|
@ -467,10 +467,10 @@ const routesUtils = {
|
|||
* @return {object} - router's response object
|
||||
*/
|
||||
responseContentHeaders(errCode, overrideParams, resHeaders, response,
|
||||
log) {
|
||||
log) {
|
||||
if (errCode && !response.headersSent) {
|
||||
return XMLResponseBackend.errorResponse(errCode, response, log,
|
||||
resHeaders);
|
||||
resHeaders);
|
||||
}
|
||||
if (!response.headersSent) {
|
||||
// Undefined added as an argument since need to send range to
|
||||
|
@ -505,7 +505,7 @@ const routesUtils = {
|
|||
retrieveDataParams, response, range, log) {
|
||||
if (errCode && !response.headersSent) {
|
||||
return XMLResponseBackend.errorResponse(errCode, response, log,
|
||||
resHeaders);
|
||||
resHeaders);
|
||||
}
|
||||
if (dataLocations !== null && !response.headersSent) {
|
||||
// sanity check of content length against individual data
|
||||
|
@ -513,13 +513,13 @@ const routesUtils = {
|
|||
const contentLength = resHeaders && resHeaders['Content-Length'];
|
||||
if (contentLength !== undefined &&
|
||||
!_contentLengthMatchesLocations(contentLength,
|
||||
dataLocations)) {
|
||||
dataLocations)) {
|
||||
log.error('logic error: total length of fetched data ' +
|
||||
'locations does not match returned content-length',
|
||||
{ contentLength, dataLocations });
|
||||
{ contentLength, dataLocations });
|
||||
return XMLResponseBackend.errorResponse(errors.InternalError,
|
||||
response, log,
|
||||
resHeaders);
|
||||
response, log,
|
||||
resHeaders);
|
||||
}
|
||||
}
|
||||
if (!response.headersSent) {
|
||||
|
@ -592,7 +592,7 @@ const routesUtils = {
|
|||
`<h1>${err.code} ${response.statusMessage}</h1>`,
|
||||
'<ul>',
|
||||
`<li>Code: ${err.message}</li>`,
|
||||
`<li>Message: ${err.description}</li>`
|
||||
`<li>Message: ${err.description}</li>`,
|
||||
);
|
||||
|
||||
if (!userErrorPageFailure && bucketName) {
|
||||
|
@ -602,7 +602,7 @@ const routesUtils = {
|
|||
`<li>RequestId: ${log.getSerializedUids()}</li>`,
|
||||
// AWS response contains HostId here.
|
||||
// TODO: consider adding
|
||||
'</ul>'
|
||||
'</ul>',
|
||||
);
|
||||
if (userErrorPageFailure) {
|
||||
html.push(
|
||||
|
@ -612,13 +612,13 @@ const routesUtils = {
|
|||
'<ul>',
|
||||
`<li>Code: ${err.message}</li>`,
|
||||
`<li>Message: ${err.description}</li>`,
|
||||
'</ul>'
|
||||
'</ul>',
|
||||
);
|
||||
}
|
||||
html.push(
|
||||
'<hr/>',
|
||||
'</body>',
|
||||
'</html>'
|
||||
'</html>',
|
||||
);
|
||||
|
||||
return response.end(html.join(''), 'utf8', () => {
|
||||
|
@ -840,7 +840,7 @@ const routesUtils = {
|
|||
// most specific potential hostname
|
||||
bucketName =
|
||||
potentialBucketName.length < bucketName.length ?
|
||||
potentialBucketName : bucketName;
|
||||
potentialBucketName : bucketName;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -848,7 +848,7 @@ const routesUtils = {
|
|||
return bucketName;
|
||||
}
|
||||
throw new Error(
|
||||
`bad request: hostname ${host} is not in valid endpoints`
|
||||
`bad request: hostname ${host} is not in valid endpoints`,
|
||||
);
|
||||
},
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const randomBytes = require('crypto').randomBytes;
|
||||
import { randomBytes } from 'crypto';
|
||||
|
||||
/*
|
||||
* This set of function allows us to create an efficient shuffle
|
||||
|
@ -18,13 +18,13 @@ const randomBytes = require('crypto').randomBytes;
|
|||
* @return {number} the lowest number of bits
|
||||
* @throws Error if number < 0
|
||||
*/
|
||||
function bitsNeeded(number) {
|
||||
if (number < 0) {
|
||||
function bitsNeeded(num: number): number {
|
||||
if (num < 0) {
|
||||
throw new Error('Input must be greater than or equal to zero');
|
||||
} else if (number === 0) {
|
||||
} else if (num === 0) {
|
||||
return 1;
|
||||
} else {
|
||||
return Math.floor(Math.log2(number)) + 1;
|
||||
return Math.floor(Math.log2(num)) + 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -36,7 +36,7 @@ function bitsNeeded(number) {
|
|||
* if numbits === 0
|
||||
* @throws Error if numBits < 0
|
||||
*/
|
||||
function createMaskOnes(numBits) {
|
||||
function createMaskOnes(numBits: number): number {
|
||||
if (numBits < 0) {
|
||||
throw new Error('Input must be greater than or equal to zero');
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ function createMaskOnes(numBits) {
|
|||
* @return {buffer} a InRangebuffer with 'howMany' pseudo-random bytes.
|
||||
* @throws Error if numBytes < 0 or if insufficient entropy
|
||||
*/
|
||||
function nextBytes(numBytes) {
|
||||
function nextBytes(numBytes: number): Buffer {
|
||||
if (numBytes < 0) {
|
||||
throw new Error('Input must be greater than or equal to zero');
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ function nextBytes(numBytes) {
|
|||
* @return {number} the number of bytes needed
|
||||
* @throws Error if numBits < 0
|
||||
*/
|
||||
function bitsToBytes(numBits) {
|
||||
function bitsToBytes(numBits: number): number {
|
||||
if (numBits < 0) {
|
||||
throw new Error('Input must be greater than or equal to zero');
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ function bitsToBytes(numBits) {
|
|||
* @return {number} - a pseudo-random integer in [min,max], undefined if
|
||||
* min >= max
|
||||
*/
|
||||
function randomRange(min, max) {
|
||||
function randomRange(min: number, max: number): number {
|
||||
if (max < min) {
|
||||
throw new Error('Invalid range');
|
||||
}
|
||||
|
@ -98,7 +98,7 @@ function randomRange(min, max) {
|
|||
// we use a mask as an optimization: it increases the chances for the
|
||||
// candidate to be in range
|
||||
const mask = createMaskOnes(bits);
|
||||
let candidate;
|
||||
let candidate: number;
|
||||
do {
|
||||
candidate = parseInt(nextBytes(bytes).toString('hex'), 16) & mask;
|
||||
} while (candidate > range);
|
||||
|
@ -111,7 +111,7 @@ function randomRange(min, max) {
|
|||
* @param {Array} array - Any type of array
|
||||
* @return {Array} - The sorted array
|
||||
*/
|
||||
module.exports = function shuffle(array) {
|
||||
export default function shuffle<T>(array: T[]): T[] {
|
||||
for (let i = array.length - 1; i > 0; i--) {
|
||||
const randIndex = randomRange(0, i);
|
||||
/* eslint-disable no-param-reassign */
|
|
@ -19,7 +19,7 @@ const externalVersioningErrorMessage = 'We do not currently support putting ' +
|
|||
|
||||
class DataWrapper {
|
||||
constructor(client, implName, config, kms, metadata, locStorageCheckFn,
|
||||
vault) {
|
||||
vault) {
|
||||
this.client = client;
|
||||
this.implName = implName;
|
||||
this.config = config;
|
||||
|
@ -36,31 +36,31 @@ class DataWrapper {
|
|||
return cb(err);
|
||||
}
|
||||
return this._put(cipherBundle, value, valueSize, keyContext,
|
||||
backendInfo, log, (err, dataRetrievalInfo, hashedStream) => {
|
||||
if (err) {
|
||||
backendInfo, log, (err, dataRetrievalInfo, hashedStream) => {
|
||||
if (err) {
|
||||
// if error putting object, counter should be decremented
|
||||
return this.locStorageCheckFn(location, -valueSize, log,
|
||||
error => {
|
||||
if (error) {
|
||||
log.error('Error decrementing location metric ' +
|
||||
return this.locStorageCheckFn(location, -valueSize, log,
|
||||
error => {
|
||||
if (error) {
|
||||
log.error('Error decrementing location metric ' +
|
||||
'following object PUT failure',
|
||||
{ error: error.message });
|
||||
}
|
||||
return cb(err);
|
||||
});
|
||||
}
|
||||
if (hashedStream) {
|
||||
if (hashedStream.completedHash) {
|
||||
return cb(null, dataRetrievalInfo, hashedStream);
|
||||
{ error: error.message });
|
||||
}
|
||||
return cb(err);
|
||||
});
|
||||
}
|
||||
hashedStream.on('hashed', () => {
|
||||
hashedStream.removeAllListeners('hashed');
|
||||
return cb(null, dataRetrievalInfo, hashedStream);
|
||||
});
|
||||
return undefined;
|
||||
}
|
||||
return cb(null, dataRetrievalInfo);
|
||||
});
|
||||
if (hashedStream) {
|
||||
if (hashedStream.completedHash) {
|
||||
return cb(null, dataRetrievalInfo, hashedStream);
|
||||
}
|
||||
hashedStream.on('hashed', () => {
|
||||
hashedStream.removeAllListeners('hashed');
|
||||
return cb(null, dataRetrievalInfo, hashedStream);
|
||||
});
|
||||
return undefined;
|
||||
}
|
||||
return cb(null, dataRetrievalInfo);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -97,33 +97,33 @@ class DataWrapper {
|
|||
clientGetInfo.response = response;
|
||||
}
|
||||
this.client.get(clientGetInfo, range, log.getSerializedUids(),
|
||||
(err, stream) => {
|
||||
if (err) {
|
||||
log.error('get error from datastore',
|
||||
{ error: err, implName: this.implName });
|
||||
return cb(errors.ServiceUnavailable);
|
||||
}
|
||||
if (objectGetInfo.cipheredDataKey) {
|
||||
const serverSideEncryption = {
|
||||
cryptoScheme: objectGetInfo.cryptoScheme,
|
||||
masterKeyId: objectGetInfo.masterKeyId,
|
||||
cipheredDataKey: Buffer.from(
|
||||
objectGetInfo.cipheredDataKey, 'base64'),
|
||||
};
|
||||
const offset = objectGetInfo.range ? objectGetInfo.range[0] : 0;
|
||||
return this.kms.createDecipherBundle(serverSideEncryption,
|
||||
offset, log, (err, decipherBundle) => {
|
||||
if (err) {
|
||||
log.error('cannot get decipher bundle from kms',
|
||||
{ method: 'data.wrapper.data.get' });
|
||||
return cb(err);
|
||||
}
|
||||
stream.pipe(decipherBundle.decipher);
|
||||
return cb(null, decipherBundle.decipher);
|
||||
});
|
||||
}
|
||||
return cb(null, stream);
|
||||
});
|
||||
(err, stream) => {
|
||||
if (err) {
|
||||
log.error('get error from datastore',
|
||||
{ error: err, implName: this.implName });
|
||||
return cb(errors.ServiceUnavailable);
|
||||
}
|
||||
if (objectGetInfo.cipheredDataKey) {
|
||||
const serverSideEncryption = {
|
||||
cryptoScheme: objectGetInfo.cryptoScheme,
|
||||
masterKeyId: objectGetInfo.masterKeyId,
|
||||
cipheredDataKey: Buffer.from(
|
||||
objectGetInfo.cipheredDataKey, 'base64'),
|
||||
};
|
||||
const offset = objectGetInfo.range ? objectGetInfo.range[0] : 0;
|
||||
return this.kms.createDecipherBundle(serverSideEncryption,
|
||||
offset, log, (err, decipherBundle) => {
|
||||
if (err) {
|
||||
log.error('cannot get decipher bundle from kms',
|
||||
{ method: 'data.wrapper.data.get' });
|
||||
return cb(err);
|
||||
}
|
||||
stream.pipe(decipherBundle.decipher);
|
||||
return cb(null, decipherBundle.decipher);
|
||||
});
|
||||
}
|
||||
return cb(null, stream);
|
||||
});
|
||||
}
|
||||
|
||||
delete(objectGetInfo, log, cb) {
|
||||
|
@ -153,15 +153,15 @@ class DataWrapper {
|
|||
if (!err) {
|
||||
// pass size as negative so location metric is decremented
|
||||
return this.locStorageCheckFn(objectGetInfo.dataStoreName,
|
||||
-objectGetInfo.size, log, err => {
|
||||
if (err) {
|
||||
log.error('Utapi error pushing location metric', {
|
||||
error: err,
|
||||
key: objectGetInfo.key,
|
||||
method: 'locationStorageCheck' });
|
||||
}
|
||||
return callback(err);
|
||||
});
|
||||
-objectGetInfo.size, log, err => {
|
||||
if (err) {
|
||||
log.error('Utapi error pushing location metric', {
|
||||
error: err,
|
||||
key: objectGetInfo.key,
|
||||
method: 'locationStorageCheck' });
|
||||
}
|
||||
return callback(err);
|
||||
});
|
||||
}
|
||||
return callback(err);
|
||||
});
|
||||
|
@ -173,7 +173,7 @@ class DataWrapper {
|
|||
// meantime, we at least log the location of the data we are
|
||||
// about to delete before attempting its deletion.
|
||||
if (this._shouldSkipDelete(locations, requestMethod,
|
||||
newObjDataStoreName)) {
|
||||
newObjDataStoreName)) {
|
||||
return process.nextTick(cb);
|
||||
}
|
||||
log.trace('initiating batch delete', {
|
||||
|
@ -210,7 +210,7 @@ class DataWrapper {
|
|||
err => {
|
||||
if (err) {
|
||||
log.end().error('batch delete failed', { error: err });
|
||||
// deletion of non-existing objects result in 204
|
||||
// deletion of non-existing objects result in 204
|
||||
if (err.code === 404) {
|
||||
return cb();
|
||||
}
|
||||
|
@ -233,27 +233,27 @@ class DataWrapper {
|
|||
return cb(null, defResp);
|
||||
}
|
||||
return this.client.healthcheck(flightCheckOnStartUp, log,
|
||||
(err, result) => {
|
||||
let respBody = {};
|
||||
if (err) {
|
||||
log.error(`error from ${this.implName}`, { error: err });
|
||||
(err, result) => {
|
||||
let respBody = {};
|
||||
if (err) {
|
||||
log.error(`error from ${this.implName}`, { error: err });
|
||||
respBody[this.implName] = {
|
||||
error: err,
|
||||
};
|
||||
// error returned as null so async parallel doesn't return
|
||||
// before all backends are checked
|
||||
return cb(null, respBody);
|
||||
}
|
||||
if (this.implName === 'multipleBackends') {
|
||||
respBody = result;
|
||||
return cb(null, respBody);
|
||||
}
|
||||
respBody[this.implName] = {
|
||||
error: err,
|
||||
code: result.statusCode,
|
||||
message: result.statusMessage,
|
||||
};
|
||||
// error returned as null so async parallel doesn't return
|
||||
// before all backends are checked
|
||||
return cb(null, respBody);
|
||||
}
|
||||
if (this.implName === 'multipleBackends') {
|
||||
respBody = result;
|
||||
return cb(null, respBody);
|
||||
}
|
||||
respBody[this.implName] = {
|
||||
code: result.statusCode,
|
||||
message: result.statusMessage,
|
||||
};
|
||||
return cb(null, respBody);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
getDiskUsage(log, cb) {
|
||||
|
@ -291,36 +291,36 @@ class DataWrapper {
|
|||
* @returns {function} cb - callback
|
||||
*/
|
||||
copyObject(request, sourceLocationConstraintName, storeMetadataParams,
|
||||
dataLocator, dataStoreContext, destBackendInfo, sourceBucketMD,
|
||||
destBucketMD, serverSideEncryption, log, cb) {
|
||||
dataLocator, dataStoreContext, destBackendInfo, sourceBucketMD,
|
||||
destBucketMD, serverSideEncryption, log, cb) {
|
||||
if (this.config.backends.data === 'multiple' &&
|
||||
backendUtils.externalBackendCopy(this.config,
|
||||
sourceLocationConstraintName, storeMetadataParams.dataStoreName,
|
||||
sourceBucketMD, destBucketMD)
|
||||
sourceLocationConstraintName, storeMetadataParams.dataStoreName,
|
||||
sourceBucketMD, destBucketMD)
|
||||
&& serverSideEncryption === null) {
|
||||
const destLocationConstraintName =
|
||||
storeMetadataParams.dataStoreName;
|
||||
const objectGetInfo = dataLocator[0];
|
||||
const externalSourceKey = objectGetInfo.key;
|
||||
return this.client.copyObject(request, destLocationConstraintName,
|
||||
externalSourceKey, sourceLocationConstraintName,
|
||||
storeMetadataParams, this.config, log,
|
||||
(error, objectRetrievalInfo) => {
|
||||
if (error) {
|
||||
return cb(error);
|
||||
}
|
||||
const putResult = {
|
||||
key: objectRetrievalInfo.key,
|
||||
dataStoreName: objectRetrievalInfo.dataStoreName,
|
||||
dataStoreType: objectRetrievalInfo.dataStoreType,
|
||||
dataStoreVersionId: objectRetrievalInfo.dataStoreVersionId,
|
||||
size: storeMetadataParams.size,
|
||||
dataStoreETag: objectGetInfo.dataStoreETag,
|
||||
start: objectGetInfo.start,
|
||||
};
|
||||
const putResultArr = [putResult];
|
||||
return cb(null, putResultArr);
|
||||
});
|
||||
externalSourceKey, sourceLocationConstraintName,
|
||||
storeMetadataParams, this.config, log,
|
||||
(error, objectRetrievalInfo) => {
|
||||
if (error) {
|
||||
return cb(error);
|
||||
}
|
||||
const putResult = {
|
||||
key: objectRetrievalInfo.key,
|
||||
dataStoreName: objectRetrievalInfo.dataStoreName,
|
||||
dataStoreType: objectRetrievalInfo.dataStoreType,
|
||||
dataStoreVersionId: objectRetrievalInfo.dataStoreVersionId,
|
||||
size: storeMetadataParams.size,
|
||||
dataStoreETag: objectGetInfo.dataStoreETag,
|
||||
start: objectGetInfo.start,
|
||||
};
|
||||
const putResultArr = [putResult];
|
||||
return cb(null, putResultArr);
|
||||
});
|
||||
}
|
||||
|
||||
const that = this;
|
||||
|
@ -330,37 +330,37 @@ class DataWrapper {
|
|||
// copied at once.
|
||||
return async.mapLimit(dataLocator, 1,
|
||||
// eslint-disable-next-line prefer-arrow-callback
|
||||
function copyPart(part, copyCb) {
|
||||
if (part.dataStoreType === 'azure') {
|
||||
const passThrough = new PassThrough();
|
||||
return async.parallel([
|
||||
parallelCb => that.get(part, passThrough, log, err =>
|
||||
parallelCb(err)),
|
||||
parallelCb => that._dataCopyPut(serverSideEncryption,
|
||||
passThrough, part, dataStoreContext,
|
||||
destBackendInfo, log, parallelCb),
|
||||
], (err, res) => {
|
||||
function copyPart(part, copyCb) {
|
||||
if (part.dataStoreType === 'azure') {
|
||||
const passThrough = new PassThrough();
|
||||
return async.parallel([
|
||||
parallelCb => that.get(part, passThrough, log, err =>
|
||||
parallelCb(err)),
|
||||
parallelCb => that._dataCopyPut(serverSideEncryption,
|
||||
passThrough, part, dataStoreContext,
|
||||
destBackendInfo, log, parallelCb),
|
||||
], (err, res) => {
|
||||
if (err) {
|
||||
return copyCb(err);
|
||||
}
|
||||
return copyCb(null, res[1]);
|
||||
});
|
||||
}
|
||||
return that.get(part, null, log, (err, stream) => {
|
||||
if (err) {
|
||||
return copyCb(err);
|
||||
}
|
||||
return copyCb(null, res[1]);
|
||||
return that._dataCopyPut(serverSideEncryption, stream,
|
||||
part, dataStoreContext, destBackendInfo, log, copyCb);
|
||||
});
|
||||
}
|
||||
return that.get(part, null, log, (err, stream) => {
|
||||
}, (err, results) => {
|
||||
if (err) {
|
||||
return copyCb(err);
|
||||
log.debug('error transferring data from source',
|
||||
{ error: err });
|
||||
return cb(err);
|
||||
}
|
||||
return that._dataCopyPut(serverSideEncryption, stream,
|
||||
part, dataStoreContext, destBackendInfo, log, copyCb);
|
||||
return cb(null, results);
|
||||
});
|
||||
}, (err, results) => {
|
||||
if (err) {
|
||||
log.debug('error transferring data from source',
|
||||
{ error: err });
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, results);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -386,8 +386,8 @@ class DataWrapper {
|
|||
* @returns {function} cb - callback
|
||||
*/
|
||||
uploadPartCopy(request, log, destBucketMD, sourceLocationConstraintName,
|
||||
destLocationConstraintName, dataLocator, dataStoreContext, lcCheckFn,
|
||||
callback) {
|
||||
destLocationConstraintName, dataLocator, dataStoreContext, lcCheckFn,
|
||||
callback) {
|
||||
const serverSideEncryption = destBucketMD.getServerSideEncryption();
|
||||
const lastModified = new Date().toJSON();
|
||||
|
||||
|
@ -425,15 +425,15 @@ class DataWrapper {
|
|||
if (locationTypeMatch && dataLocator.length === 1) {
|
||||
const sourceKey = dataLocator[0].key;
|
||||
return this.client.uploadPartCopy(request,
|
||||
destLocationConstraintName, sourceKey, sourceLocationConstraintName,
|
||||
this.config, log, (error, eTag) => {
|
||||
if (error) {
|
||||
return callback(error);
|
||||
}
|
||||
const doSkip = srcType === 'aws' ? skipError : null;
|
||||
return callback(doSkip, eTag, lastModified,
|
||||
serverSideEncryption);
|
||||
});
|
||||
destLocationConstraintName, sourceKey, sourceLocationConstraintName,
|
||||
this.config, log, (error, eTag) => {
|
||||
if (error) {
|
||||
return callback(error);
|
||||
}
|
||||
const doSkip = srcType === 'aws' ? skipError : null;
|
||||
return callback(doSkip, eTag, lastModified,
|
||||
serverSideEncryption);
|
||||
});
|
||||
}
|
||||
|
||||
const backendInfo = new BackendInfo(this._config,
|
||||
|
@ -448,72 +448,72 @@ class DataWrapper {
|
|||
// in order so can get the ETag of full object
|
||||
return async.forEachOfSeries(dataLocator,
|
||||
// eslint-disable-next-line prefer-arrow-callback
|
||||
function copyPart(part, index, cb) {
|
||||
if (part.dataStoreType === 'azure') {
|
||||
const passThrough = new PassThrough();
|
||||
return async.parallel([
|
||||
next => that.get(part, passThrough, log, err => {
|
||||
if (err) {
|
||||
log.error('error getting data part from Azure',
|
||||
{
|
||||
error: err,
|
||||
method: 'objectPutCopyPart::' +
|
||||
function copyPart(part, index, cb) {
|
||||
if (part.dataStoreType === 'azure') {
|
||||
const passThrough = new PassThrough();
|
||||
return async.parallel([
|
||||
next => that.get(part, passThrough, log, err => {
|
||||
if (err) {
|
||||
log.error('error getting data part from Azure',
|
||||
{
|
||||
error: err,
|
||||
method: 'objectPutCopyPart::' +
|
||||
'multipleBackendGateway.copyPart',
|
||||
});
|
||||
return next(err);
|
||||
});
|
||||
return next(err);
|
||||
}
|
||||
return next();
|
||||
}),
|
||||
next => that._dataCopyPutPart(request,
|
||||
serverSideEncryption, passThrough, part,
|
||||
dataStoreContext, backendInfo, locations, log, next),
|
||||
], err => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
return next();
|
||||
}),
|
||||
next => that._dataCopyPutPart(request,
|
||||
serverSideEncryption, passThrough, part,
|
||||
dataStoreContext, backendInfo, locations, log, next),
|
||||
], err => {
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
return that.get(part, null, log, (err, stream) => {
|
||||
if (err) {
|
||||
log.debug('error getting object part', { error: err });
|
||||
return cb(err);
|
||||
}
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
return that.get(part, null, log, (err, stream) => {
|
||||
if (err) {
|
||||
log.debug('error getting object part', { error: err });
|
||||
return cb(err);
|
||||
}
|
||||
const hashedStream =
|
||||
const hashedStream =
|
||||
new RelayMD5Sum(totalHash, updatedHash => {
|
||||
totalHash = updatedHash;
|
||||
});
|
||||
stream.pipe(hashedStream);
|
||||
stream.pipe(hashedStream);
|
||||
|
||||
// destLocationConstraintName is location of the
|
||||
// destination MPU object
|
||||
return that._dataCopyPutPart(request, serverSideEncryption,
|
||||
hashedStream, part, dataStoreContext, backendInfo,
|
||||
locations, log, cb);
|
||||
});
|
||||
}, err => {
|
||||
// destLocationConstraintName is location of the
|
||||
// destination MPU object
|
||||
return that._dataCopyPutPart(request, serverSideEncryption,
|
||||
hashedStream, part, dataStoreContext, backendInfo,
|
||||
locations, log, cb);
|
||||
});
|
||||
}, err => {
|
||||
// Digest the final combination of all of the part streams
|
||||
if (err && err !== skipError) {
|
||||
log.debug('error transferring data from source',
|
||||
{ error: err, method: 'goGetData' });
|
||||
return callback(err);
|
||||
}
|
||||
if (totalHash) {
|
||||
totalHash = totalHash.digest('hex');
|
||||
} else {
|
||||
totalHash = locations[0].dataStoreETag;
|
||||
}
|
||||
if (err && err === skipError) {
|
||||
return callback(skipError, totalHash, lastModified,
|
||||
serverSideEncryption);
|
||||
}
|
||||
return callback(null, totalHash, lastModified,
|
||||
serverSideEncryption, locations);
|
||||
});
|
||||
if (err && err !== skipError) {
|
||||
log.debug('error transferring data from source',
|
||||
{ error: err, method: 'goGetData' });
|
||||
return callback(err);
|
||||
}
|
||||
if (totalHash) {
|
||||
totalHash = totalHash.digest('hex');
|
||||
} else {
|
||||
totalHash = locations[0].dataStoreETag;
|
||||
}
|
||||
if (err && err === skipError) {
|
||||
return callback(skipError, totalHash, lastModified,
|
||||
serverSideEncryption);
|
||||
}
|
||||
return callback(null, totalHash, lastModified,
|
||||
serverSideEncryption, locations);
|
||||
});
|
||||
}
|
||||
|
||||
abortMPU(objectKey, uploadId, location, bucketName, request, destBucket,
|
||||
lcCheckFn, log, callback) {
|
||||
lcCheckFn, log, callback) {
|
||||
if (this.config.backends.data === 'multiple') {
|
||||
// if controlling location constraint is not stored in object
|
||||
// metadata, mpu was initiated in legacy S3C, so need to
|
||||
|
@ -531,13 +531,13 @@ class DataWrapper {
|
|||
}
|
||||
|
||||
return this.client.abortMPU(objectKey, uploadId, location,
|
||||
bucketName, log, callback);
|
||||
bucketName, log, callback);
|
||||
}
|
||||
return callback(null, false);
|
||||
}
|
||||
|
||||
completeMPU(request, mpuInfo, mdInfo, location, userMetadata,
|
||||
contentSettings, tagging, lcCheckFn, log, callback) {
|
||||
contentSettings, tagging, lcCheckFn, log, callback) {
|
||||
const { objectKey, uploadId, jsonList, bucketName, destBucket } =
|
||||
mpuInfo;
|
||||
if (this.config.backends.data === 'multiple') {
|
||||
|
@ -576,30 +576,30 @@ class DataWrapper {
|
|||
} = mpuInfo;
|
||||
if (this.config.backends.data === 'multiple') {
|
||||
return this.client.createMPU(objectKey, metaHeaders,
|
||||
bucketName, websiteRedirectHeader, locConstraint, contentType,
|
||||
cacheControl, contentDisposition, contentEncoding, tagging, log,
|
||||
(err, dataBackendResObj) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
const configLoc =
|
||||
this.config.locationConstraints[locConstraint];
|
||||
if (locConstraint && configLoc && configLoc.type &&
|
||||
constants.versioningNotImplBackends[configLoc.type]) {
|
||||
const vcfg = destinationBucket.getVersioningConfiguration();
|
||||
const isVersionedObj = vcfg && vcfg.Status === 'Enabled';
|
||||
if (isVersionedObj) {
|
||||
log.debug(externalVersioningErrorMessage,
|
||||
{ method: 'initiateMultipartUpload',
|
||||
error: errors.NotImplemented });
|
||||
return callback(errors.NotImplemented
|
||||
.customizeDescription(
|
||||
externalVersioningErrorMessage),
|
||||
null, isVersionedObj);
|
||||
bucketName, websiteRedirectHeader, locConstraint, contentType,
|
||||
cacheControl, contentDisposition, contentEncoding, tagging, log,
|
||||
(err, dataBackendResObj) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
}
|
||||
return callback(null, dataBackendResObj);
|
||||
});
|
||||
const configLoc =
|
||||
this.config.locationConstraints[locConstraint];
|
||||
if (locConstraint && configLoc && configLoc.type &&
|
||||
constants.versioningNotImplBackends[configLoc.type]) {
|
||||
const vcfg = destinationBucket.getVersioningConfiguration();
|
||||
const isVersionedObj = vcfg && vcfg.Status === 'Enabled';
|
||||
if (isVersionedObj) {
|
||||
log.debug(externalVersioningErrorMessage,
|
||||
{ method: 'initiateMultipartUpload',
|
||||
error: errors.NotImplemented });
|
||||
return callback(errors.NotImplemented
|
||||
.customizeDescription(
|
||||
externalVersioningErrorMessage),
|
||||
null, isVersionedObj);
|
||||
}
|
||||
}
|
||||
return callback(null, dataBackendResObj);
|
||||
});
|
||||
}
|
||||
return callback();
|
||||
}
|
||||
|
@ -632,19 +632,19 @@ class DataWrapper {
|
|||
location = mpuOverviewObj.controllingLocationConstraint;
|
||||
}
|
||||
return this.client.listParts(objectKey, uploadId,
|
||||
location, bucketName, partNumberMarker, maxParts, log,
|
||||
(err, backendPartList) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
return callback(null, backendPartList);
|
||||
});
|
||||
location, bucketName, partNumberMarker, maxParts, log,
|
||||
(err, backendPartList) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
return callback(null, backendPartList);
|
||||
});
|
||||
}
|
||||
return callback();
|
||||
}
|
||||
|
||||
putPart(request, mpuInfo, streamingV4Params, objectLocationConstraint,
|
||||
lcCheckFn, log, callback) {
|
||||
lcCheckFn, log, callback) {
|
||||
const {
|
||||
stream,
|
||||
destinationBucket,
|
||||
|
@ -668,19 +668,19 @@ class DataWrapper {
|
|||
objectLocationConstraint = backendInfoObj.controllingLC;
|
||||
}
|
||||
return this.client.uploadPart(request,
|
||||
streamingV4Params, stream, size, objectLocationConstraint,
|
||||
objectKey, uploadId, partNumber, bucketName, log,
|
||||
(err, partInfo) => {
|
||||
if (err) {
|
||||
log.error('error putting part to data backend', {
|
||||
error: err,
|
||||
method:
|
||||
streamingV4Params, stream, size, objectLocationConstraint,
|
||||
objectKey, uploadId, partNumber, bucketName, log,
|
||||
(err, partInfo) => {
|
||||
if (err) {
|
||||
log.error('error putting part to data backend', {
|
||||
error: err,
|
||||
method:
|
||||
'objectPutPart::multipleBackendGateway.uploadPart',
|
||||
});
|
||||
return callback(err);
|
||||
}
|
||||
return callback(null, partInfo, objectLocationConstraint);
|
||||
});
|
||||
});
|
||||
return callback(err);
|
||||
}
|
||||
return callback(null, partInfo, objectLocationConstraint);
|
||||
});
|
||||
}
|
||||
return callback();
|
||||
}
|
||||
|
@ -688,7 +688,7 @@ class DataWrapper {
|
|||
objectTagging(method, objectKey, bucket, objectMD, log, callback) {
|
||||
if (this.config.backends.data === 'multiple') {
|
||||
return this.client.objectTagging(method, objectKey,
|
||||
bucket, objectMD, log, err => callback(err));
|
||||
bucket, objectMD, log, err => callback(err));
|
||||
}
|
||||
return callback();
|
||||
}
|
||||
|
@ -716,7 +716,7 @@ class DataWrapper {
|
|||
return callback();
|
||||
}
|
||||
|
||||
/**
|
||||
/**
|
||||
* _putForCopy - put used for copying object
|
||||
* @param {object} cipherBundle - cipher bundle that encrypt the data
|
||||
* @param {object} stream - stream containing the data
|
||||
|
@ -735,34 +735,34 @@ class DataWrapper {
|
|||
* @returns {function} cb - callback
|
||||
*/
|
||||
_putForCopy(cipherBundle, stream, part, dataStoreContext, destBackendInfo,
|
||||
log, cb) {
|
||||
log, cb) {
|
||||
return this.put(cipherBundle, stream, part.size, dataStoreContext,
|
||||
destBackendInfo, log, (error, partRetrievalInfo) => {
|
||||
if (error) {
|
||||
return cb(error);
|
||||
}
|
||||
const partResult = {
|
||||
key: partRetrievalInfo.key,
|
||||
dataStoreName: partRetrievalInfo
|
||||
.dataStoreName,
|
||||
dataStoreType: partRetrievalInfo
|
||||
.dataStoreType,
|
||||
start: part.start,
|
||||
size: part.size,
|
||||
};
|
||||
if (cipherBundle) {
|
||||
partResult.cryptoScheme = cipherBundle.cryptoScheme;
|
||||
partResult.cipheredDataKey = cipherBundle.cipheredDataKey;
|
||||
}
|
||||
if (part.dataStoreETag) {
|
||||
partResult.dataStoreETag = part.dataStoreETag;
|
||||
}
|
||||
if (partRetrievalInfo.dataStoreVersionId) {
|
||||
partResult.dataStoreVersionId =
|
||||
destBackendInfo, log, (error, partRetrievalInfo) => {
|
||||
if (error) {
|
||||
return cb(error);
|
||||
}
|
||||
const partResult = {
|
||||
key: partRetrievalInfo.key,
|
||||
dataStoreName: partRetrievalInfo
|
||||
.dataStoreName,
|
||||
dataStoreType: partRetrievalInfo
|
||||
.dataStoreType,
|
||||
start: part.start,
|
||||
size: part.size,
|
||||
};
|
||||
if (cipherBundle) {
|
||||
partResult.cryptoScheme = cipherBundle.cryptoScheme;
|
||||
partResult.cipheredDataKey = cipherBundle.cipheredDataKey;
|
||||
}
|
||||
if (part.dataStoreETag) {
|
||||
partResult.dataStoreETag = part.dataStoreETag;
|
||||
}
|
||||
if (partRetrievalInfo.dataStoreVersionId) {
|
||||
partResult.dataStoreVersionId =
|
||||
partRetrievalInfo.dataStoreVersionId;
|
||||
}
|
||||
return cb(null, partResult);
|
||||
});
|
||||
}
|
||||
return cb(null, partResult);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -783,17 +783,17 @@ class DataWrapper {
|
|||
* @returns {function} cb - callback
|
||||
*/
|
||||
_dataCopyPut(serverSideEncryption, stream, part, dataStoreContext,
|
||||
destBackendInfo, log, cb) {
|
||||
destBackendInfo, log, cb) {
|
||||
if (serverSideEncryption) {
|
||||
return this.kms.createCipherBundle(serverSideEncryption, log,
|
||||
(err, cipherBundle) => {
|
||||
if (err) {
|
||||
log.debug('error getting cipherBundle');
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
return this._putForCopy(cipherBundle, stream, part,
|
||||
dataStoreContext, destBackendInfo, log, cb);
|
||||
});
|
||||
(err, cipherBundle) => {
|
||||
if (err) {
|
||||
log.debug('error getting cipherBundle');
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
return this._putForCopy(cipherBundle, stream, part,
|
||||
dataStoreContext, destBackendInfo, log, cb);
|
||||
});
|
||||
}
|
||||
// Copied object is not encrypted so just put it
|
||||
// without a cipherBundle
|
||||
|
@ -802,7 +802,7 @@ class DataWrapper {
|
|||
}
|
||||
|
||||
_dataCopyPutPart(request, serverSideEncryption, stream, part,
|
||||
dataStoreContext, destBackendInfo, locations, log, cb) {
|
||||
dataStoreContext, destBackendInfo, locations, log, cb) {
|
||||
const numberPartSize = Number.parseInt(part.size, 10);
|
||||
const partNumber = Number.parseInt(request.query.partNumber, 10);
|
||||
const uploadId = request.query.uploadId;
|
||||
|
@ -811,100 +811,100 @@ class DataWrapper {
|
|||
const destLocationConstraintName = destBackendInfo
|
||||
.getControllingLocationConstraint();
|
||||
if (externalBackends[this.config.locationConstraints
|
||||
[destLocationConstraintName].type]) {
|
||||
[destLocationConstraintName].type]) {
|
||||
return this.client.uploadPart(null, null, stream,
|
||||
numberPartSize, destLocationConstraintName, destObjectKey, uploadId,
|
||||
partNumber, destBucketName, log, (err, partInfo) => {
|
||||
if (err) {
|
||||
log.error('error putting part to AWS', {
|
||||
error: err,
|
||||
method: 'objectPutCopyPart::' +
|
||||
numberPartSize, destLocationConstraintName, destObjectKey, uploadId,
|
||||
partNumber, destBucketName, log, (err, partInfo) => {
|
||||
if (err) {
|
||||
log.error('error putting part to AWS', {
|
||||
error: err,
|
||||
method: 'objectPutCopyPart::' +
|
||||
'multipleBackendGateway.uploadPart',
|
||||
});
|
||||
return cb(errors.ServiceUnavailable);
|
||||
}
|
||||
// skip to end of waterfall because don't need to store
|
||||
// part metadata
|
||||
if (partInfo && partInfo.dataStoreType === 'aws_s3') {
|
||||
});
|
||||
return cb(errors.ServiceUnavailable);
|
||||
}
|
||||
// skip to end of waterfall because don't need to store
|
||||
// part metadata
|
||||
if (partInfo && partInfo.dataStoreType === 'aws_s3') {
|
||||
// if data backend handles MPU, skip to end of waterfall
|
||||
const partResult = {
|
||||
dataStoreETag: partInfo.dataStoreETag,
|
||||
};
|
||||
locations.push(partResult);
|
||||
return cb(skipError, partInfo.dataStoreETag);
|
||||
} else if (partInfo && partInfo.dataStoreType === 'azure') {
|
||||
const partResult = {
|
||||
key: partInfo.key,
|
||||
dataStoreName: partInfo.dataStoreName,
|
||||
dataStoreETag: partInfo.dataStoreETag,
|
||||
size: numberPartSize,
|
||||
numberSubParts: partInfo.numberSubParts,
|
||||
partNumber: partInfo.partNumber,
|
||||
};
|
||||
locations.push(partResult);
|
||||
return cb();
|
||||
} else if (partInfo && partInfo.dataStoreType === 'gcp') {
|
||||
const partResult = {
|
||||
key: partInfo.key,
|
||||
dataStoreName: partInfo.dataStoreName,
|
||||
dataStoreETag: partInfo.dataStoreETag,
|
||||
size: numberPartSize,
|
||||
partNumber: partInfo.partNumber,
|
||||
};
|
||||
locations.push(partResult);
|
||||
return cb();
|
||||
}
|
||||
return cb(skipError);
|
||||
});
|
||||
const partResult = {
|
||||
dataStoreETag: partInfo.dataStoreETag,
|
||||
};
|
||||
locations.push(partResult);
|
||||
return cb(skipError, partInfo.dataStoreETag);
|
||||
} else if (partInfo && partInfo.dataStoreType === 'azure') {
|
||||
const partResult = {
|
||||
key: partInfo.key,
|
||||
dataStoreName: partInfo.dataStoreName,
|
||||
dataStoreETag: partInfo.dataStoreETag,
|
||||
size: numberPartSize,
|
||||
numberSubParts: partInfo.numberSubParts,
|
||||
partNumber: partInfo.partNumber,
|
||||
};
|
||||
locations.push(partResult);
|
||||
return cb();
|
||||
} else if (partInfo && partInfo.dataStoreType === 'gcp') {
|
||||
const partResult = {
|
||||
key: partInfo.key,
|
||||
dataStoreName: partInfo.dataStoreName,
|
||||
dataStoreETag: partInfo.dataStoreETag,
|
||||
size: numberPartSize,
|
||||
partNumber: partInfo.partNumber,
|
||||
};
|
||||
locations.push(partResult);
|
||||
return cb();
|
||||
}
|
||||
return cb(skipError);
|
||||
});
|
||||
}
|
||||
if (serverSideEncryption) {
|
||||
return this.kms.createCipherBundle(serverSideEncryption, log,
|
||||
(err, cipherBundle) => {
|
||||
if (err) {
|
||||
log.debug('error getting cipherBundle', { error: err });
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
return this.put(cipherBundle, stream, numberPartSize,
|
||||
dataStoreContext, destBackendInfo, log,
|
||||
(error, partRetrievalInfo, hashedStream) => {
|
||||
if (error) {
|
||||
log.debug('error putting encrypted part', { error });
|
||||
return cb(error);
|
||||
(err, cipherBundle) => {
|
||||
if (err) {
|
||||
log.debug('error getting cipherBundle', { error: err });
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
const partResult = {
|
||||
key: partRetrievalInfo.key,
|
||||
dataStoreName: partRetrievalInfo.dataStoreName,
|
||||
dataStoreETag: hashedStream.completedHash,
|
||||
// Do not include part start here since will change in
|
||||
// final MPU object
|
||||
size: numberPartSize,
|
||||
sseCryptoScheme: cipherBundle.cryptoScheme,
|
||||
sseCipheredDataKey: cipherBundle.cipheredDataKey,
|
||||
sseAlgorithm: cipherBundle.algorithm,
|
||||
sseMasterKeyId: cipherBundle.masterKeyId,
|
||||
};
|
||||
locations.push(partResult);
|
||||
return cb();
|
||||
return this.put(cipherBundle, stream, numberPartSize,
|
||||
dataStoreContext, destBackendInfo, log,
|
||||
(error, partRetrievalInfo, hashedStream) => {
|
||||
if (error) {
|
||||
log.debug('error putting encrypted part', { error });
|
||||
return cb(error);
|
||||
}
|
||||
const partResult = {
|
||||
key: partRetrievalInfo.key,
|
||||
dataStoreName: partRetrievalInfo.dataStoreName,
|
||||
dataStoreETag: hashedStream.completedHash,
|
||||
// Do not include part start here since will change in
|
||||
// final MPU object
|
||||
size: numberPartSize,
|
||||
sseCryptoScheme: cipherBundle.cryptoScheme,
|
||||
sseCipheredDataKey: cipherBundle.cipheredDataKey,
|
||||
sseAlgorithm: cipherBundle.algorithm,
|
||||
sseMasterKeyId: cipherBundle.masterKeyId,
|
||||
};
|
||||
locations.push(partResult);
|
||||
return cb();
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
// Copied object is not encrypted so just put it
|
||||
// without a cipherBundle
|
||||
return this.put(null, stream, numberPartSize, dataStoreContext,
|
||||
destBackendInfo, log, (error, partRetrievalInfo, hashedStream) => {
|
||||
if (error) {
|
||||
log.debug('error putting object part', { error });
|
||||
return cb(error);
|
||||
}
|
||||
const partResult = {
|
||||
key: partRetrievalInfo.key,
|
||||
dataStoreName: partRetrievalInfo.dataStoreName,
|
||||
dataStoreETag: hashedStream.completedHash,
|
||||
size: numberPartSize,
|
||||
};
|
||||
locations.push(partResult);
|
||||
return cb();
|
||||
});
|
||||
destBackendInfo, log, (error, partRetrievalInfo, hashedStream) => {
|
||||
if (error) {
|
||||
log.debug('error putting object part', { error });
|
||||
return cb(error);
|
||||
}
|
||||
const partResult = {
|
||||
key: partRetrievalInfo.key,
|
||||
dataStoreName: partRetrievalInfo.dataStoreName,
|
||||
dataStoreETag: hashedStream.completedHash,
|
||||
size: numberPartSize,
|
||||
};
|
||||
locations.push(partResult);
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
|
||||
_put(cipherBundle, value, valueSize, keyContext, backendInfo, log, cb) {
|
||||
|
@ -930,14 +930,14 @@ class DataWrapper {
|
|||
/* eslint-disable no-param-reassign */
|
||||
keyContext.cipherBundle = cipherBundle;
|
||||
return this.client.put(hashedStream, valueSize, keyContext,
|
||||
backendInfo, log.getSerializedUids(), (err, dataRetrievalInfo) => {
|
||||
if (err) {
|
||||
log.error('put error from datastore',
|
||||
{ error: err, implName: this.implName });
|
||||
return cb(errors.ServiceUnavailable);
|
||||
}
|
||||
return cb(null, dataRetrievalInfo, hashedStream);
|
||||
});
|
||||
backendInfo, log.getSerializedUids(), (err, dataRetrievalInfo) => {
|
||||
if (err) {
|
||||
log.error('put error from datastore',
|
||||
{ error: err, implName: this.implName });
|
||||
return cb(errors.ServiceUnavailable);
|
||||
}
|
||||
return cb(null, dataRetrievalInfo, hashedStream);
|
||||
});
|
||||
}
|
||||
/* eslint-enable no-param-reassign */
|
||||
let writeStream = hashedStream;
|
||||
|
@ -947,18 +947,18 @@ class DataWrapper {
|
|||
}
|
||||
|
||||
return this.client.put(writeStream, valueSize, keyContext,
|
||||
log.getSerializedUids(), (err, key) => {
|
||||
if (err) {
|
||||
log.error('put error from datastore',
|
||||
{ error: err, implName: this.implName });
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
const dataRetrievalInfo = {
|
||||
key,
|
||||
dataStoreName: this.implName,
|
||||
};
|
||||
return cb(null, dataRetrievalInfo, hashedStream);
|
||||
});
|
||||
log.getSerializedUids(), (err, key) => {
|
||||
if (err) {
|
||||
log.error('put error from datastore',
|
||||
{ error: err, implName: this.implName });
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
const dataRetrievalInfo = {
|
||||
key,
|
||||
dataStoreName: this.implName,
|
||||
};
|
||||
return cb(null, dataRetrievalInfo, hashedStream);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -976,25 +976,25 @@ class DataWrapper {
|
|||
return cb(errors.InternalError);
|
||||
}
|
||||
return this.client.delete(objectGetInfo, log.getSerializedUids(),
|
||||
err => {
|
||||
if (err) {
|
||||
if (err.ObjNotFound) {
|
||||
log.info('no such key in datastore', {
|
||||
objectGetInfo,
|
||||
err => {
|
||||
if (err) {
|
||||
if (err.ObjNotFound) {
|
||||
log.info('no such key in datastore', {
|
||||
objectGetInfo,
|
||||
implName: this.implName,
|
||||
moreRetries: 'no',
|
||||
});
|
||||
return cb(err);
|
||||
}
|
||||
log.error('delete error from datastore', {
|
||||
error: err,
|
||||
implName: this.implName,
|
||||
moreRetries: 'no',
|
||||
moreRetries: 'yes',
|
||||
});
|
||||
return cb(err);
|
||||
return this._retryDelete(objectGetInfo, log, count + 1, cb);
|
||||
}
|
||||
log.error('delete error from datastore', {
|
||||
error: err,
|
||||
implName: this.implName,
|
||||
moreRetries: 'yes',
|
||||
});
|
||||
return this._retryDelete(objectGetInfo, log, count + 1, cb);
|
||||
}
|
||||
return cb();
|
||||
});
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
|
||||
// This check is done because on a put, complete mpu or copy request to
|
||||
|
|
|
@ -30,7 +30,7 @@ function parseLC(config, vault) {
|
|||
if (locationObj.details.connector.sproxyd) {
|
||||
clients[location] = new Sproxy({
|
||||
bootstrap: locationObj.details.connector
|
||||
.sproxyd.bootstrap,
|
||||
.sproxyd.bootstrap,
|
||||
// Might be undefined which is ok since there is a default
|
||||
// set in sproxydclient if chordCos is undefined
|
||||
chordCos: locationObj.details.connector.sproxyd.chordCos,
|
||||
|
@ -60,7 +60,7 @@ function parseLC(config, vault) {
|
|||
// keepalive config
|
||||
const httpAgentConfig =
|
||||
config.externalBackends[locationObj.type].httpAgent;
|
||||
// max sockets is infinity by default and expressed as null
|
||||
// max sockets is infinity by default and expressed as null
|
||||
if (httpAgentConfig.maxSockets === null) {
|
||||
httpAgentConfig.maxSockets = undefined;
|
||||
}
|
||||
|
@ -131,7 +131,7 @@ function parseLC(config, vault) {
|
|||
const azureStorageEndpoint = config.getAzureEndpoint(location);
|
||||
const proxyParams =
|
||||
backendUtils.proxyCompareUrl(azureStorageEndpoint) ?
|
||||
{} : config.outboundProxy;
|
||||
{} : config.outboundProxy;
|
||||
const azureStorageCredentials =
|
||||
config.getAzureStorageCredentials(location);
|
||||
clients[location] = new AzureClient({
|
||||
|
|
|
@ -52,26 +52,26 @@ class MultipleBackendGateway {
|
|||
}
|
||||
}
|
||||
return client.put(writeStream, size, keyContext, reqUids,
|
||||
(err, key, dataStoreVersionId, dataStoreSize, dataStoreMD5) => {
|
||||
const log = createLogger(reqUids);
|
||||
log.debug('put to location', { controllingLocationConstraint });
|
||||
if (err) {
|
||||
log.error('error from datastore',
|
||||
{ error: err, dataStoreType: client.clientType });
|
||||
return callback(errors.ServiceUnavailable);
|
||||
}
|
||||
const dataRetrievalInfo = {
|
||||
key,
|
||||
dataStoreName: controllingLocationConstraint,
|
||||
dataStoreType: client.clientType,
|
||||
dataStoreVersionId,
|
||||
dataStoreSize,
|
||||
dataStoreMD5,
|
||||
};
|
||||
return callback(null, dataRetrievalInfo);
|
||||
(err, key, dataStoreVersionId, dataStoreSize, dataStoreMD5) => {
|
||||
const log = createLogger(reqUids);
|
||||
log.debug('put to location', { controllingLocationConstraint });
|
||||
if (err) {
|
||||
log.error('error from datastore',
|
||||
{ error: err, dataStoreType: client.clientType });
|
||||
return callback(errors.ServiceUnavailable);
|
||||
}
|
||||
const dataRetrievalInfo = {
|
||||
key,
|
||||
dataStoreName: controllingLocationConstraint,
|
||||
dataStoreType: client.clientType,
|
||||
dataStoreVersionId,
|
||||
dataStoreSize,
|
||||
dataStoreMD5,
|
||||
};
|
||||
return callback(null, dataRetrievalInfo);
|
||||
// sproxyd accepts keyschema, send as null so sproxyd generates key
|
||||
// send metadata as param for AzureClient in Arsenal
|
||||
}, null, this.metadata);
|
||||
}, null, this.metadata);
|
||||
}
|
||||
|
||||
head(objectGetInfoArr, reqUids, callback) {
|
||||
|
@ -166,14 +166,14 @@ class MultipleBackendGateway {
|
|||
}, () => {
|
||||
async.parallel([
|
||||
next => checkExternalBackend(
|
||||
this.clients, awsArray, 'aws_s3', flightCheckOnStartUp,
|
||||
externalBackendHealthCheckInterval, next),
|
||||
this.clients, awsArray, 'aws_s3', flightCheckOnStartUp,
|
||||
externalBackendHealthCheckInterval, next),
|
||||
next => checkExternalBackend(
|
||||
this.clients, azureArray, 'azure', flightCheckOnStartUp,
|
||||
externalBackendHealthCheckInterval, next),
|
||||
this.clients, azureArray, 'azure', flightCheckOnStartUp,
|
||||
externalBackendHealthCheckInterval, next),
|
||||
next => checkExternalBackend(
|
||||
this.clients, gcpArray, 'gcp', flightCheckOnStartUp,
|
||||
externalBackendHealthCheckInterval, next),
|
||||
this.clients, gcpArray, 'gcp', flightCheckOnStartUp,
|
||||
externalBackendHealthCheckInterval, next),
|
||||
], (errNull, externalResp) => {
|
||||
const externalLocResults = [];
|
||||
externalResp.forEach(resp => externalLocResults.push(...resp));
|
||||
|
@ -185,19 +185,19 @@ class MultipleBackendGateway {
|
|||
}
|
||||
|
||||
createMPU(key, metaHeaders, bucketName, websiteRedirectHeader,
|
||||
location, contentType, cacheControl, contentDisposition,
|
||||
contentEncoding, tagging, log, cb) {
|
||||
location, contentType, cacheControl, contentDisposition,
|
||||
contentEncoding, tagging, log, cb) {
|
||||
const client = this.clients[location];
|
||||
if (client.clientType === 'aws_s3' || client.clientType === 'gcp') {
|
||||
return client.createMPU(key, metaHeaders, bucketName,
|
||||
websiteRedirectHeader, contentType, cacheControl,
|
||||
contentDisposition, contentEncoding, tagging, log, cb);
|
||||
websiteRedirectHeader, contentType, cacheControl,
|
||||
contentDisposition, contentEncoding, tagging, log, cb);
|
||||
}
|
||||
return cb();
|
||||
}
|
||||
|
||||
uploadPart(request, streamingV4Params, stream, size, location, key,
|
||||
uploadId, partNumber, bucketName, log, cb) {
|
||||
uploadId, partNumber, bucketName, log, cb) {
|
||||
const client = this.clients[location];
|
||||
|
||||
if (client.uploadPart) {
|
||||
|
@ -206,29 +206,29 @@ class MultipleBackendGateway {
|
|||
return cb(err);
|
||||
}
|
||||
return client.uploadPart(request, streamingV4Params, stream,
|
||||
size, key, uploadId, partNumber, bucketName, log,
|
||||
(err, partInfo) => {
|
||||
if (err) {
|
||||
size, key, uploadId, partNumber, bucketName, log,
|
||||
(err, partInfo) => {
|
||||
if (err) {
|
||||
// if error putting part, counter should be decremented
|
||||
return this.locStorageCheckFn(location, -size, log,
|
||||
error => {
|
||||
if (error) {
|
||||
log.error('Error decrementing location ' +
|
||||
return this.locStorageCheckFn(location, -size, log,
|
||||
error => {
|
||||
if (error) {
|
||||
log.error('Error decrementing location ' +
|
||||
'metric following object PUT failure',
|
||||
{ error: error.message });
|
||||
}
|
||||
return cb(err);
|
||||
});
|
||||
}
|
||||
return cb(null, partInfo);
|
||||
});
|
||||
{ error: error.message });
|
||||
}
|
||||
return cb(err);
|
||||
});
|
||||
}
|
||||
return cb(null, partInfo);
|
||||
});
|
||||
});
|
||||
}
|
||||
return cb();
|
||||
}
|
||||
|
||||
listParts(key, uploadId, location, bucketName, partNumberMarker, maxParts,
|
||||
log, cb) {
|
||||
log, cb) {
|
||||
const client = this.clients[location];
|
||||
|
||||
if (client.listParts) {
|
||||
|
@ -239,7 +239,7 @@ class MultipleBackendGateway {
|
|||
}
|
||||
|
||||
completeMPU(key, uploadId, location, jsonList, mdInfo, bucketName,
|
||||
userMetadata, contentSettings, tagging, log, cb) {
|
||||
userMetadata, contentSettings, tagging, log, cb) {
|
||||
const client = this.clients[location];
|
||||
if (client.completeMPU) {
|
||||
const args = [jsonList, mdInfo, key, uploadId, bucketName];
|
||||
|
@ -291,40 +291,40 @@ class MultipleBackendGateway {
|
|||
// NOTE: using copyObject only if copying object from one external
|
||||
// backend to the same external backend
|
||||
copyObject(request, destLocationConstraintName, externalSourceKey,
|
||||
sourceLocationConstraintName, storeMetadataParams, config, log, cb) {
|
||||
sourceLocationConstraintName, storeMetadataParams, config, log, cb) {
|
||||
const client = this.clients[destLocationConstraintName];
|
||||
if (client.copyObject) {
|
||||
return this.locStorageCheckFn(destLocationConstraintName,
|
||||
storeMetadataParams.size, log, err => {
|
||||
if (err) {
|
||||
cb(err);
|
||||
}
|
||||
return client.copyObject(request, destLocationConstraintName,
|
||||
externalSourceKey, sourceLocationConstraintName,
|
||||
storeMetadataParams, config, log,
|
||||
(err, key, dataStoreVersionId) => {
|
||||
const dataRetrievalInfo = {
|
||||
key,
|
||||
dataStoreName: destLocationConstraintName,
|
||||
dataStoreType: client.clientType,
|
||||
dataStoreVersionId,
|
||||
};
|
||||
storeMetadataParams.size, log, err => {
|
||||
if (err) {
|
||||
// if error copying obj, counter should be decremented
|
||||
return this.locStorageCheckFn(
|
||||
destLocationConstraintName, -storeMetadataParams.size,
|
||||
log, error => {
|
||||
if (error) {
|
||||
log.error('Error decrementing location ' +
|
||||
'metric following object PUT failure',
|
||||
{ error: error.message });
|
||||
}
|
||||
return cb(err);
|
||||
});
|
||||
cb(err);
|
||||
}
|
||||
return cb(null, dataRetrievalInfo);
|
||||
return client.copyObject(request, destLocationConstraintName,
|
||||
externalSourceKey, sourceLocationConstraintName,
|
||||
storeMetadataParams, config, log,
|
||||
(err, key, dataStoreVersionId) => {
|
||||
const dataRetrievalInfo = {
|
||||
key,
|
||||
dataStoreName: destLocationConstraintName,
|
||||
dataStoreType: client.clientType,
|
||||
dataStoreVersionId,
|
||||
};
|
||||
if (err) {
|
||||
// if error copying obj, counter should be decremented
|
||||
return this.locStorageCheckFn(
|
||||
destLocationConstraintName, -storeMetadataParams.size,
|
||||
log, error => {
|
||||
if (error) {
|
||||
log.error('Error decrementing location ' +
|
||||
'metric following object PUT failure',
|
||||
{ error: error.message });
|
||||
}
|
||||
return cb(err);
|
||||
});
|
||||
}
|
||||
return cb(null, dataRetrievalInfo);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
return cb(errors.NotImplemented
|
||||
.customizeDescription('Can not copy object from ' +
|
||||
|
@ -332,15 +332,15 @@ class MultipleBackendGateway {
|
|||
}
|
||||
|
||||
uploadPartCopy(request, location, awsSourceKey,
|
||||
sourceLocationConstraintName, config, log, cb) {
|
||||
sourceLocationConstraintName, config, log, cb) {
|
||||
const client = this.clients[location];
|
||||
if (client.uploadPartCopy) {
|
||||
return client.uploadPartCopy(request, awsSourceKey,
|
||||
sourceLocationConstraintName, config,
|
||||
log, cb);
|
||||
sourceLocationConstraintName, config,
|
||||
log, cb);
|
||||
}
|
||||
return cb(errors.NotImplemented.customizeDescription(
|
||||
'Can not copy object from ' +
|
||||
'Can not copy object from ' +
|
||||
`${client.clientType} to ${client.clientType}`));
|
||||
}
|
||||
|
||||
|
@ -348,7 +348,7 @@ class MultipleBackendGateway {
|
|||
const client = this.clients[location];
|
||||
if (client.protectAzureBlocks) {
|
||||
return client.protectAzureBlocks(this.metadata, bucketName,
|
||||
objectKey, location, log, cb);
|
||||
objectKey, location, log, cb);
|
||||
}
|
||||
return cb();
|
||||
}
|
||||
|
|
|
@ -11,7 +11,7 @@ const { createLogger, logHelper, removeQuotes, trimXMetaPrefix } =
|
|||
|
||||
const missingVerIdInternalError = errors.InternalError.customizeDescription(
|
||||
'Invalid state. Please ensure versioning is enabled ' +
|
||||
'in AWS for the location constraint and try again.'
|
||||
'in AWS for the location constraint and try again.',
|
||||
);
|
||||
|
||||
class AwsClient {
|
||||
|
@ -36,35 +36,35 @@ class AwsClient {
|
|||
// this request implicitly updates the endpoint for the location
|
||||
// the following code explcitly sets it to avoid surprises
|
||||
this._client.getBucketLocation({ Bucket: this._awsBucketName },
|
||||
(err, res) => {
|
||||
if (err && err.code !== 'AuthorizationHeaderMalformed') {
|
||||
this._logger.error('error during setup', {
|
||||
error: err,
|
||||
method: 'AwsClient.setup',
|
||||
});
|
||||
return cb(err);
|
||||
}
|
||||
let region;
|
||||
if (err && err.code === 'AuthorizationHeaderMalformed') {
|
||||
(err, res) => {
|
||||
if (err && err.code !== 'AuthorizationHeaderMalformed') {
|
||||
this._logger.error('error during setup', {
|
||||
error: err,
|
||||
method: 'AwsClient.setup',
|
||||
});
|
||||
return cb(err);
|
||||
}
|
||||
let region;
|
||||
if (err && err.code === 'AuthorizationHeaderMalformed') {
|
||||
// set regional endpoint
|
||||
region = err.region;
|
||||
} else if (res) {
|
||||
region = res.LocationConstraint;
|
||||
}
|
||||
this._client.config.update({ region });
|
||||
region = err.region;
|
||||
} else if (res) {
|
||||
region = res.LocationConstraint;
|
||||
}
|
||||
this._client.config.update({ region });
|
||||
|
||||
const isAWS = this._s3Params.endpoint.endsWith('amazonaws.com');
|
||||
if (region && isAWS) {
|
||||
const endpoint = `s3.${region}.amazonaws.com`;
|
||||
this._logger.debug('setting regional endpoint', {
|
||||
method: 'AwsClient.setup',
|
||||
region,
|
||||
endpoint,
|
||||
});
|
||||
this._client.endpoint = new AWS.Endpoint(endpoint);
|
||||
}
|
||||
return cb();
|
||||
});
|
||||
const isAWS = this._s3Params.endpoint.endsWith('amazonaws.com');
|
||||
if (region && isAWS) {
|
||||
const endpoint = `s3.${region}.amazonaws.com`;
|
||||
this._logger.debug('setting regional endpoint', {
|
||||
method: 'AwsClient.setup',
|
||||
region,
|
||||
endpoint,
|
||||
});
|
||||
this._client.endpoint = new AWS.Endpoint(endpoint);
|
||||
}
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
|
||||
_createAwsKey(requestBucketName, requestObjectKey,
|
||||
|
@ -84,23 +84,23 @@ class AwsClient {
|
|||
|
||||
put(stream, size, keyContext, reqUids, callback) {
|
||||
const awsKey = this._createAwsKey(keyContext.bucketName,
|
||||
keyContext.objectKey, this._bucketMatch);
|
||||
keyContext.objectKey, this._bucketMatch);
|
||||
const metaHeaders = trimXMetaPrefix(keyContext.metaHeaders);
|
||||
const log = createLogger(reqUids);
|
||||
|
||||
const putCb = (err, data) => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from data backend',
|
||||
err, this._dataStoreName, this.clientType);
|
||||
err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
if (!data.VersionId && this._supportsVersioning) {
|
||||
logHelper(log, 'error', 'missing version id for data ' +
|
||||
'backend object', missingVerIdInternalError,
|
||||
this._dataStoreName, this.clientType);
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(missingVerIdInternalError);
|
||||
}
|
||||
const dataStoreVersionId = data.VersionId;
|
||||
|
@ -180,8 +180,8 @@ class AwsClient {
|
|||
Range: range ? `bytes=${range[0]}-${range[1]}` : null,
|
||||
}).on('success', response => {
|
||||
log.trace(`${this.type} GET request response headers`,
|
||||
{ responseHeaders: response.httpResponse.headers,
|
||||
backendType: this.clientType });
|
||||
{ responseHeaders: response.httpResponse.headers,
|
||||
backendType: this.clientType });
|
||||
});
|
||||
const stream = request.createReadStream();
|
||||
|
||||
|
@ -202,8 +202,8 @@ class AwsClient {
|
|||
logLevel = 'error';
|
||||
}
|
||||
logHelper(log, logLevel,
|
||||
`error streaming data from ${this.type}`,
|
||||
err, this._dataStoreName, this.clientType);
|
||||
`error streaming data from ${this.type}`,
|
||||
err, this._dataStoreName, this.clientType);
|
||||
});
|
||||
// Always call the callback asynchronously: the caller may
|
||||
// destroy the stream with destroy(), which MUST be
|
||||
|
@ -232,8 +232,8 @@ class AwsClient {
|
|||
return callback();
|
||||
}
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback();
|
||||
|
@ -243,39 +243,39 @@ class AwsClient {
|
|||
healthcheck(location, callback) {
|
||||
const awsResp = {};
|
||||
this._client.headBucket({ Bucket: this._awsBucketName },
|
||||
err => {
|
||||
err => {
|
||||
/* eslint-disable no-param-reassign */
|
||||
if (err) {
|
||||
awsResp[location] = { error: err, external: true };
|
||||
return callback(null, awsResp);
|
||||
}
|
||||
if (!this._supportsVersioning) {
|
||||
awsResp[location] = {
|
||||
message: 'Congrats! You own the bucket',
|
||||
};
|
||||
return callback(null, awsResp);
|
||||
}
|
||||
return this._client.getBucketVersioning({
|
||||
Bucket: this._awsBucketName },
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
awsResp[location] = { error: err, external: true };
|
||||
} else if (!data.Status ||
|
||||
data.Status === 'Suspended') {
|
||||
return callback(null, awsResp);
|
||||
}
|
||||
if (!this._supportsVersioning) {
|
||||
awsResp[location] = {
|
||||
versioningStatus: data.Status,
|
||||
error: 'Versioning must be enabled',
|
||||
external: true,
|
||||
};
|
||||
} else {
|
||||
awsResp[location] = {
|
||||
versioningStatus: data.Status,
|
||||
message: 'Congrats! You own the bucket',
|
||||
};
|
||||
return callback(null, awsResp);
|
||||
}
|
||||
return callback(null, awsResp);
|
||||
return this._client.getBucketVersioning({
|
||||
Bucket: this._awsBucketName },
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
awsResp[location] = { error: err, external: true };
|
||||
} else if (!data.Status ||
|
||||
data.Status === 'Suspended') {
|
||||
awsResp[location] = {
|
||||
versioningStatus: data.Status,
|
||||
error: 'Versioning must be enabled',
|
||||
external: true,
|
||||
};
|
||||
} else {
|
||||
awsResp[location] = {
|
||||
versioningStatus: data.Status,
|
||||
message: 'Congrats! You own the bucket',
|
||||
};
|
||||
}
|
||||
return callback(null, awsResp);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
createMPU(key, metaHeaders, bucketName, websiteRedirectHeader, contentType,
|
||||
|
@ -304,10 +304,10 @@ class AwsClient {
|
|||
return this._client.createMultipartUpload(params, (err, mpuResObj) => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from data backend',
|
||||
err, this._dataStoreName, this.clientType);
|
||||
err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback(null, mpuResObj);
|
||||
|
@ -315,7 +315,7 @@ class AwsClient {
|
|||
}
|
||||
|
||||
uploadPart(request, streamingV4Params, stream, size, key, uploadId,
|
||||
partNumber, bucketName, log, callback) {
|
||||
partNumber, bucketName, log, callback) {
|
||||
let hashedStream = stream;
|
||||
if (request) {
|
||||
const partStream = prepareStream(request, streamingV4Params,
|
||||
|
@ -335,7 +335,7 @@ class AwsClient {
|
|||
'on uploadPart', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
// Because we manually add quotes to ETag later, remove quotes here
|
||||
|
@ -352,7 +352,7 @@ class AwsClient {
|
|||
}
|
||||
|
||||
listParts(key, uploadId, bucketName, partNumberMarker, maxParts, log,
|
||||
callback) {
|
||||
callback) {
|
||||
const awsBucket = this._awsBucketName;
|
||||
const awsKey = this._createAwsKey(bucketName, key, this._bucketMatch);
|
||||
const params = { Bucket: awsBucket, Key: awsKey, UploadId: uploadId,
|
||||
|
@ -360,10 +360,10 @@ class AwsClient {
|
|||
return this._client.listParts(params, (err, partList) => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from data backend on listPart',
|
||||
err, this._dataStoreName, this.clientType);
|
||||
err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
// build storedParts object to mimic Scality S3 backend returns
|
||||
|
@ -424,47 +424,47 @@ class AwsClient {
|
|||
};
|
||||
const completeObjData = { key: awsKey };
|
||||
return this._client.completeMultipartUpload(mpuParams,
|
||||
(err, completeMpuRes) => {
|
||||
if (err) {
|
||||
if (mpuError[err.code]) {
|
||||
logHelper(log, 'trace', 'err from data backend on ' +
|
||||
'completeMPU', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors[err.code]);
|
||||
}
|
||||
logHelper(log, 'error', 'err from data backend on ' +
|
||||
'completeMPU', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
);
|
||||
}
|
||||
if (!completeMpuRes.VersionId && this._supportsVersioning) {
|
||||
logHelper(log, 'error', 'missing version id for data ' +
|
||||
'backend object', missingVerIdInternalError,
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(missingVerIdInternalError);
|
||||
}
|
||||
// need to get content length of new object to store
|
||||
// in our metadata
|
||||
return this._client.headObject({ Bucket: awsBucket, Key: awsKey },
|
||||
(err, objHeaders) => {
|
||||
(err, completeMpuRes) => {
|
||||
if (err) {
|
||||
logHelper(log, 'trace', 'err from data backend on ' +
|
||||
'headObject', err, this._dataStoreName, this.clientType);
|
||||
if (mpuError[err.code]) {
|
||||
logHelper(log, 'trace', 'err from data backend on ' +
|
||||
'completeMPU', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors[err.code]);
|
||||
}
|
||||
logHelper(log, 'error', 'err from data backend on ' +
|
||||
'completeMPU', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
// remove quotes from eTag because they're added later
|
||||
completeObjData.eTag = completeMpuRes.ETag
|
||||
.substring(1, completeMpuRes.ETag.length - 1);
|
||||
completeObjData.dataStoreVersionId = completeMpuRes.VersionId;
|
||||
completeObjData.contentLength =
|
||||
if (!completeMpuRes.VersionId && this._supportsVersioning) {
|
||||
logHelper(log, 'error', 'missing version id for data ' +
|
||||
'backend object', missingVerIdInternalError,
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(missingVerIdInternalError);
|
||||
}
|
||||
// need to get content length of new object to store
|
||||
// in our metadata
|
||||
return this._client.headObject({ Bucket: awsBucket, Key: awsKey },
|
||||
(err, objHeaders) => {
|
||||
if (err) {
|
||||
logHelper(log, 'trace', 'err from data backend on ' +
|
||||
'headObject', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
// remove quotes from eTag because they're added later
|
||||
completeObjData.eTag = completeMpuRes.ETag
|
||||
.substring(1, completeMpuRes.ETag.length - 1);
|
||||
completeObjData.dataStoreVersionId = completeMpuRes.VersionId;
|
||||
completeObjData.contentLength =
|
||||
Number.parseInt(objHeaders.ContentLength, 10);
|
||||
return callback(null, completeObjData);
|
||||
return callback(null, completeObjData);
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
abortMPU(key, uploadId, bucketName, log, callback) {
|
||||
|
@ -480,8 +480,8 @@ class AwsClient {
|
|||
'using the same uploadId.', err, this._dataStoreName,
|
||||
this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback();
|
||||
|
@ -507,10 +507,10 @@ class AwsClient {
|
|||
if (err) {
|
||||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'putObjectTagging', err,
|
||||
this._dataStoreName, this.clientType);
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback();
|
||||
|
@ -532,19 +532,19 @@ class AwsClient {
|
|||
'deleteObjectTagging', err,
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback();
|
||||
});
|
||||
}
|
||||
copyObject(request, destLocationConstraintName, sourceKey,
|
||||
sourceLocationConstraintName, storeMetadataParams, config, log, callback) {
|
||||
sourceLocationConstraintName, storeMetadataParams, config, log, callback) {
|
||||
const destBucketName = request.bucketName;
|
||||
const destObjectKey = request.objectKey;
|
||||
const destAwsKey = this._createAwsKey(destBucketName, destObjectKey,
|
||||
this._bucketMatch);
|
||||
this._bucketMatch);
|
||||
|
||||
const sourceAwsBucketName =
|
||||
config.getAwsBucketName(sourceLocationConstraintName);
|
||||
|
@ -569,15 +569,15 @@ class AwsClient {
|
|||
`${sourceAwsBucketName} ${this.type} bucket`, err,
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.AccessDenied
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceAwsBucketName} ${this.type} bucket`)
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceAwsBucketName} ${this.type} bucket`),
|
||||
);
|
||||
}
|
||||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'copyObject', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
if (!copyResult.VersionId && this._supportsVersioning) {
|
||||
|
@ -590,7 +590,7 @@ class AwsClient {
|
|||
if (err || !data.VersionId) {
|
||||
logHelper(log, 'error', 'missing version id for data ' +
|
||||
'backend object', missingVerIdInternalError,
|
||||
this._dataStoreName, this.clientType);
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(missingVerIdInternalError);
|
||||
}
|
||||
return callback(null, destAwsKey, data.VersionId);
|
||||
|
@ -600,11 +600,11 @@ class AwsClient {
|
|||
});
|
||||
}
|
||||
uploadPartCopy(request, awsSourceKey, sourceLocationConstraintName,
|
||||
config, log, callback) {
|
||||
config, log, callback) {
|
||||
const destBucketName = request.bucketName;
|
||||
const destObjectKey = request.objectKey;
|
||||
const destAwsKey = this._createAwsKey(destBucketName, destObjectKey,
|
||||
this._bucketMatch);
|
||||
this._bucketMatch);
|
||||
|
||||
const sourceAwsBucketName =
|
||||
config.getAwsBucketName(sourceLocationConstraintName);
|
||||
|
@ -628,15 +628,15 @@ class AwsClient {
|
|||
`${sourceAwsBucketName} AWS bucket`, err,
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.AccessDenied
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceAwsBucketName} AWS bucket`)
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceAwsBucketName} AWS bucket`),
|
||||
);
|
||||
}
|
||||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'uploadPartCopy', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.type}: ${err.message}`),
|
||||
);
|
||||
}
|
||||
const eTag = removeQuotes(res.CopyPartResult.ETag);
|
||||
|
|
|
@ -51,7 +51,7 @@ class AzureClient {
|
|||
if (log) {
|
||||
log.error('error thrown by Azure Storage Client Library',
|
||||
{ error: err.message, stack: err.stack, s3Method,
|
||||
azureMethod, dataStoreName: this._dataStoreName });
|
||||
azureMethod, dataStoreName: this._dataStoreName });
|
||||
}
|
||||
cb(error.customizeDescription('Error from Azure ' +
|
||||
`method: ${azureMethod} on ${s3Method} S3 call: ` +
|
||||
|
@ -82,7 +82,7 @@ class AzureClient {
|
|||
// same key name. If it does, do not allow put or delete because Azure
|
||||
// will delete all blocks with same key name
|
||||
protectAzureBlocks(metadata, bucketName, objectKey, dataStoreName,
|
||||
log, cb) {
|
||||
log, cb) {
|
||||
const mpuBucketName = `${constants.mpuBucketPrefix}${bucketName}`;
|
||||
const splitter = constants.splitter;
|
||||
const listingParams = {
|
||||
|
@ -93,23 +93,23 @@ class AzureClient {
|
|||
};
|
||||
|
||||
return metadata.listMultipartUploads(mpuBucketName, listingParams,
|
||||
log, (err, mpuList) => {
|
||||
if (err && !err.NoSuchBucket) {
|
||||
log.error('Error listing MPUs for Azure delete',
|
||||
{ error: err, dataStoreName });
|
||||
return cb(errors.ServiceUnavailable);
|
||||
}
|
||||
if (mpuList && mpuList.Uploads && mpuList.Uploads.length > 0) {
|
||||
const error = errors.MPUinProgress;
|
||||
log.error('Error: cannot put/delete object to Azure with ' +
|
||||
log, (err, mpuList) => {
|
||||
if (err && !err.NoSuchBucket) {
|
||||
log.error('Error listing MPUs for Azure delete',
|
||||
{ error: err, dataStoreName });
|
||||
return cb(errors.ServiceUnavailable);
|
||||
}
|
||||
if (mpuList && mpuList.Uploads && mpuList.Uploads.length > 0) {
|
||||
const error = errors.MPUinProgress;
|
||||
log.error('Error: cannot put/delete object to Azure with ' +
|
||||
'same key name as ongoing MPU on Azure',
|
||||
{ error, dataStoreName });
|
||||
return cb(error);
|
||||
}
|
||||
// If listMultipartUploads returns a NoSuchBucket error or the
|
||||
// mpu list is empty, there are no conflicting MPUs, so continue
|
||||
return cb();
|
||||
});
|
||||
return cb(error);
|
||||
}
|
||||
// If listMultipartUploads returns a NoSuchBucket error or the
|
||||
// mpu list is empty, there are no conflicting MPUs, so continue
|
||||
return cb();
|
||||
});
|
||||
}
|
||||
|
||||
toObjectGetInfo(objectKey, bucketName) {
|
||||
|
@ -123,52 +123,52 @@ class AzureClient {
|
|||
const log = createLogger(reqUids);
|
||||
// before blob is put, make sure there is no ongoing MPU with same key
|
||||
this.protectAzureBlocks(metadata, keyContext.bucketName,
|
||||
keyContext.objectKey, this._dataStoreName, log, err => {
|
||||
keyContext.objectKey, this._dataStoreName, log, err => {
|
||||
// if error returned, there is ongoing MPU, so do not put
|
||||
if (err) {
|
||||
return callback(err.customizeDescription(
|
||||
`Error putting object to Azure: ${err.message}`));
|
||||
}
|
||||
const azureKey = this._createAzureKey(keyContext.bucketName,
|
||||
keyContext.objectKey, this._bucketMatch);
|
||||
const options = {
|
||||
metadata: translateAzureMetaHeaders(keyContext.metaHeaders,
|
||||
keyContext.tagging),
|
||||
contentSettings: {
|
||||
contentType: keyContext.contentType || undefined,
|
||||
cacheControl: keyContext.cacheControl || undefined,
|
||||
contentDisposition: keyContext.contentDisposition ||
|
||||
if (err) {
|
||||
return callback(err.customizeDescription(
|
||||
`Error putting object to Azure: ${err.message}`));
|
||||
}
|
||||
const azureKey = this._createAzureKey(keyContext.bucketName,
|
||||
keyContext.objectKey, this._bucketMatch);
|
||||
const options = {
|
||||
metadata: translateAzureMetaHeaders(keyContext.metaHeaders,
|
||||
keyContext.tagging),
|
||||
contentSettings: {
|
||||
contentType: keyContext.contentType || undefined,
|
||||
cacheControl: keyContext.cacheControl || undefined,
|
||||
contentDisposition: keyContext.contentDisposition ||
|
||||
undefined,
|
||||
contentEncoding: keyContext.contentEncoding || undefined,
|
||||
},
|
||||
};
|
||||
if (size === 0) {
|
||||
return this._errorWrapper('put', 'createBlockBlobFromText',
|
||||
[this._azureContainerName, azureKey, '', options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data ' +
|
||||
contentEncoding: keyContext.contentEncoding || undefined,
|
||||
},
|
||||
};
|
||||
if (size === 0) {
|
||||
return this._errorWrapper('put', 'createBlockBlobFromText',
|
||||
[this._azureContainerName, azureKey, '', options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data ' +
|
||||
'backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
}
|
||||
return callback(null, azureKey);
|
||||
}], log, callback);
|
||||
}
|
||||
return this._errorWrapper('put', 'createBlockBlobFromStream',
|
||||
[this._azureContainerName, azureKey, stream, size, options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data ' +
|
||||
}
|
||||
return callback(null, azureKey);
|
||||
}], log, callback);
|
||||
}
|
||||
return this._errorWrapper('put', 'createBlockBlobFromStream',
|
||||
[this._azureContainerName, azureKey, stream, size, options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT data ' +
|
||||
'backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
}
|
||||
return callback(null, azureKey);
|
||||
}], log, callback);
|
||||
});
|
||||
}
|
||||
return callback(null, azureKey);
|
||||
}], log, callback);
|
||||
});
|
||||
}
|
||||
|
||||
head(objectGetInfo, reqUids, callback) {
|
||||
|
@ -176,25 +176,25 @@ class AzureClient {
|
|||
const { key, azureStreamingOptions } = objectGetInfo;
|
||||
return this._errorWrapper('head', 'getBlobProperties',
|
||||
[this._azureContainerName, key, azureStreamingOptions,
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
let logLevel;
|
||||
let retError;
|
||||
if (err.code === 'NotFound') {
|
||||
logLevel = 'info';
|
||||
retError = errors.LocationNotFound;
|
||||
} else {
|
||||
logLevel = 'error';
|
||||
retError = errors.ServiceUnavailable
|
||||
.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`);
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
let logLevel;
|
||||
let retError;
|
||||
if (err.code === 'NotFound') {
|
||||
logLevel = 'info';
|
||||
retError = errors.LocationNotFound;
|
||||
} else {
|
||||
logLevel = 'error';
|
||||
retError = errors.ServiceUnavailable
|
||||
.customizeDescription(
|
||||
`Error returned from Azure: ${err.message}`);
|
||||
}
|
||||
logHelper(log, logLevel, 'err from Azure HEAD data backend',
|
||||
err, this._dataStoreName);
|
||||
return callback(retError);
|
||||
}
|
||||
logHelper(log, logLevel, 'err from Azure HEAD data backend',
|
||||
err, this._dataStoreName);
|
||||
return callback(retError);
|
||||
}
|
||||
return callback(null, data);
|
||||
}], log, callback);
|
||||
return callback(null, data);
|
||||
}], log, callback);
|
||||
}
|
||||
|
||||
get(objectGetInfo, range, reqUids, callback) {
|
||||
|
@ -213,14 +213,14 @@ class AzureClient {
|
|||
}
|
||||
this._errorWrapper('get', 'getBlobToStream',
|
||||
[this._azureContainerName, key, response, streamingOptions,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure GET data backend',
|
||||
err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
}
|
||||
return callback(null, response);
|
||||
}], log, callback);
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure GET data backend',
|
||||
err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
}
|
||||
return callback(null, response);
|
||||
}], log, callback);
|
||||
}
|
||||
|
||||
delete(objectGetInfo, reqUids, callback) {
|
||||
|
@ -234,20 +234,20 @@ class AzureClient {
|
|||
}
|
||||
return this._errorWrapper('delete', 'deleteBlobIfExists',
|
||||
[this._azureContainerName, key, options,
|
||||
err => {
|
||||
if (err && err.statusCode === 412) {
|
||||
return callback(errors.PreconditionFailed);
|
||||
}
|
||||
if (err) {
|
||||
const log = createLogger(reqUids);
|
||||
logHelper(log, 'error', 'error deleting object from ' +
|
||||
err => {
|
||||
if (err && err.statusCode === 412) {
|
||||
return callback(errors.PreconditionFailed);
|
||||
}
|
||||
if (err) {
|
||||
const log = createLogger(reqUids);
|
||||
logHelper(log, 'error', 'error deleting object from ' +
|
||||
'Azure datastore', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}
|
||||
|
||||
healthcheck(location, callback, flightCheckOnStartUp) {
|
||||
|
@ -271,7 +271,7 @@ class AzureClient {
|
|||
}
|
||||
|
||||
uploadPart(request, streamingV4Params, partStream, size, key, uploadId,
|
||||
partNumber, bucket, log, callback) {
|
||||
partNumber, bucket, log, callback) {
|
||||
const azureKey = this._createAzureKey(bucket, key, this._bucketMatch);
|
||||
const params = { bucketName: this._azureContainerName,
|
||||
partNumber, size, objectKey: azureKey, uploadId };
|
||||
|
@ -299,27 +299,27 @@ class AzureClient {
|
|||
if (size <= azureMpuUtils.maxSubPartSize) {
|
||||
const errorWrapperFn = this._errorWrapper.bind(this);
|
||||
return azureMpuUtils.putSinglePart(errorWrapperFn,
|
||||
stream, params, this._dataStoreName, log, (err, dataStoreETag) => {
|
||||
stream, params, this._dataStoreName, log, (err, dataStoreETag) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
dataRetrievalInfo.dataStoreETag = dataStoreETag;
|
||||
return callback(null, dataRetrievalInfo);
|
||||
});
|
||||
}
|
||||
const errorWrapperFn = this._errorWrapper.bind(this);
|
||||
return azureMpuUtils.putSubParts(errorWrapperFn, stream,
|
||||
params, this._dataStoreName, log, (err, dataStoreETag) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
dataRetrievalInfo.dataStoreETag = dataStoreETag;
|
||||
return callback(null, dataRetrievalInfo);
|
||||
});
|
||||
}
|
||||
const errorWrapperFn = this._errorWrapper.bind(this);
|
||||
return azureMpuUtils.putSubParts(errorWrapperFn, stream,
|
||||
params, this._dataStoreName, log, (err, dataStoreETag) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
dataRetrievalInfo.dataStoreETag = dataStoreETag;
|
||||
return callback(null, dataRetrievalInfo);
|
||||
});
|
||||
}
|
||||
|
||||
completeMPU(jsonList, mdInfo, key, uploadId, bucket, metaHeaders,
|
||||
contentSettings, tagging, log, callback) {
|
||||
contentSettings, tagging, log, callback) {
|
||||
const azureKey = this._createAzureKey(bucket, key, this._bucketMatch);
|
||||
const commitList = {
|
||||
UncommittedBlocks: jsonList.uncommittedBlocks || [],
|
||||
|
@ -345,20 +345,20 @@ class AzureClient {
|
|||
};
|
||||
return this._errorWrapper('completeMPU', 'commitBlocks',
|
||||
[this._azureContainerName, azureKey, commitList, options,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err completing MPU on Azure ' +
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err completing MPU on Azure ' +
|
||||
'datastore', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
}
|
||||
const completeObjData = {
|
||||
key: azureKey,
|
||||
filteredPartsObj,
|
||||
};
|
||||
return callback(null, completeObjData);
|
||||
}], log, callback);
|
||||
}
|
||||
const completeObjData = {
|
||||
key: azureKey,
|
||||
filteredPartsObj,
|
||||
};
|
||||
return callback(null, completeObjData);
|
||||
}], log, callback);
|
||||
}
|
||||
|
||||
objectPutTagging(key, bucket, objectMD, log, callback) {
|
||||
|
@ -367,14 +367,14 @@ class AzureClient {
|
|||
azureMD.tags = JSON.stringify(objectMD.tags);
|
||||
this._errorWrapper('objectPutTagging', 'setBlobMetadata',
|
||||
[this._azureContainerName, azureKey, azureMD,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to ' +
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to ' +
|
||||
'Azure backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}
|
||||
|
||||
objectDeleteTagging(key, bucketName, objectMD, log, callback) {
|
||||
|
@ -382,27 +382,27 @@ class AzureClient {
|
|||
const azureMD = this._getMetaHeaders(objectMD);
|
||||
this._errorWrapper('objectDeleteTagging', 'setBlobMetadata',
|
||||
[this._azureContainerName, azureKey, azureMD,
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to ' +
|
||||
err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err putting object tags to ' +
|
||||
'Azure backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
return callback(errors.ServiceUnavailable);
|
||||
}
|
||||
return callback();
|
||||
}], log, callback);
|
||||
}
|
||||
|
||||
copyObject(request, destLocationConstraintName, sourceKey,
|
||||
sourceLocationConstraintName, storeMetadataParams, config, log, callback) {
|
||||
sourceLocationConstraintName, storeMetadataParams, config, log, callback) {
|
||||
const destContainerName = request.bucketName;
|
||||
const destObjectKey = request.objectKey;
|
||||
|
||||
const destAzureKey = this._createAzureKey(destContainerName,
|
||||
destObjectKey, this._bucketMatch);
|
||||
destObjectKey, this._bucketMatch);
|
||||
|
||||
const sourceContainerName =
|
||||
config.locationConstraints[sourceLocationConstraintName]
|
||||
.details.azureContainerName;
|
||||
.details.azureContainerName;
|
||||
|
||||
let options;
|
||||
if (storeMetadataParams.metaHeaders) {
|
||||
|
@ -413,7 +413,7 @@ class AzureClient {
|
|||
this._errorWrapper('copyObject', 'startCopyBlob',
|
||||
[`${this._azureStorageEndpoint}` +
|
||||
`${sourceContainerName}/${sourceKey}`,
|
||||
this._azureContainerName, destAzureKey, options,
|
||||
this._azureContainerName, destAzureKey, options,
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
if (err.code === 'CannotVerifyCopySource') {
|
||||
|
@ -421,36 +421,36 @@ class AzureClient {
|
|||
`${sourceContainerName} Azure Container`, err,
|
||||
this._dataStoreName);
|
||||
return callback(errors.AccessDenied
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceContainerName} Azure Container`)
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceContainerName} Azure Container`),
|
||||
);
|
||||
}
|
||||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'copyObject', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS: ${err.message}`),
|
||||
);
|
||||
}
|
||||
if (res.copy.status === 'pending') {
|
||||
logHelper(log, 'error', 'Azure copy status is pending',
|
||||
err, this._dataStoreName);
|
||||
err, this._dataStoreName);
|
||||
const copyId = res.copy.id;
|
||||
this._client.abortCopyBlob(this._azureContainerName,
|
||||
destAzureKey, copyId, err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'error from data backend ' +
|
||||
destAzureKey, copyId, err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'error from data backend ' +
|
||||
'on abortCopyBlob', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS on abortCopyBlob: ${err.message}`)
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`AWS on abortCopyBlob: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback(errors.InvalidObjectState
|
||||
.customizeDescription('Error: Azure copy status was ' +
|
||||
'pending. It has been aborted successfully'),
|
||||
);
|
||||
}
|
||||
return callback(errors.InvalidObjectState
|
||||
.customizeDescription('Error: Azure copy status was ' +
|
||||
'pending. It has been aborted successfully')
|
||||
);
|
||||
});
|
||||
});
|
||||
}
|
||||
return callback(null, destAzureKey);
|
||||
}], log, callback);
|
||||
|
|
|
@ -24,7 +24,7 @@ class MpuHelper {
|
|||
const handleFunc = (fnName, params, retry, callback) => {
|
||||
const timeout = backoff.duration();
|
||||
return setTimeout((params, cb) =>
|
||||
this.service[fnName](params, cb), timeout, params,
|
||||
this.service[fnName](params, cb), timeout, params,
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
if (err.statusCode === 429 || err.code === 429) {
|
||||
|
@ -90,53 +90,53 @@ class MpuHelper {
|
|||
splitMerge(params, partList, level, callback) {
|
||||
// create composition of slices from the partList array
|
||||
return async.mapLimit(eachSlice.call(partList, 32),
|
||||
this.service._maxConcurrent,
|
||||
(infoParts, cb) => {
|
||||
const mpuPartList = infoParts.Parts.map(item =>
|
||||
({ PartName: item.PartName }));
|
||||
const partNumber = infoParts.PartNumber;
|
||||
const tmpKey =
|
||||
this.service._maxConcurrent,
|
||||
(infoParts, cb) => {
|
||||
const mpuPartList = infoParts.Parts.map(item =>
|
||||
({ PartName: item.PartName }));
|
||||
const partNumber = infoParts.PartNumber;
|
||||
const tmpKey =
|
||||
createMpuKey(params.Key, params.UploadId, partNumber, level);
|
||||
const mergedObject = { PartName: tmpKey };
|
||||
if (mpuPartList.length < 2) {
|
||||
logger.trace(
|
||||
'splitMerge: parts are fewer than 2, copy instead');
|
||||
// else just perform a copy
|
||||
const copyParams = {
|
||||
const mergedObject = { PartName: tmpKey };
|
||||
if (mpuPartList.length < 2) {
|
||||
logger.trace(
|
||||
'splitMerge: parts are fewer than 2, copy instead');
|
||||
// else just perform a copy
|
||||
const copyParams = {
|
||||
Bucket: params.MPU,
|
||||
Key: tmpKey,
|
||||
CopySource: `${params.MPU}/${mpuPartList[0].PartName}`,
|
||||
};
|
||||
return this.service.copyObject(copyParams, (err, res) => {
|
||||
if (err) {
|
||||
logHelper(logger, 'error',
|
||||
'error in splitMerge - copyObject', err);
|
||||
return cb(err);
|
||||
}
|
||||
mergedObject.VersionId = res.VersionId;
|
||||
mergedObject.ETag = res.ETag;
|
||||
return cb(null, mergedObject);
|
||||
});
|
||||
}
|
||||
const composeParams = {
|
||||
Bucket: params.MPU,
|
||||
Key: tmpKey,
|
||||
CopySource: `${params.MPU}/${mpuPartList[0].PartName}`,
|
||||
MultipartUpload: { Parts: mpuPartList },
|
||||
};
|
||||
return this.service.copyObject(copyParams, (err, res) => {
|
||||
return this.retryCompose(composeParams, (err, res) => {
|
||||
if (err) {
|
||||
logHelper(logger, 'error',
|
||||
'error in splitMerge - copyObject', err);
|
||||
return cb(err);
|
||||
}
|
||||
mergedObject.VersionId = res.VersionId;
|
||||
mergedObject.ETag = res.ETag;
|
||||
return cb(null, mergedObject);
|
||||
});
|
||||
}
|
||||
const composeParams = {
|
||||
Bucket: params.MPU,
|
||||
Key: tmpKey,
|
||||
MultipartUpload: { Parts: mpuPartList },
|
||||
};
|
||||
return this.retryCompose(composeParams, (err, res) => {
|
||||
}, (err, res) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
return callback(err);
|
||||
}
|
||||
mergedObject.VersionId = res.VersionId;
|
||||
mergedObject.ETag = res.ETag;
|
||||
return cb(null, mergedObject);
|
||||
return callback(null, res.length);
|
||||
});
|
||||
}, (err, res) => {
|
||||
if (err) {
|
||||
return callback(err);
|
||||
}
|
||||
return callback(null, res.length);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -284,7 +284,7 @@ class MpuHelper {
|
|||
if (err) {
|
||||
logHelper(logger, 'error', 'error in ' +
|
||||
'createMultipartUpload - final copyObject',
|
||||
err);
|
||||
err);
|
||||
return next(err);
|
||||
}
|
||||
const mpuResult = {
|
||||
|
@ -301,7 +301,7 @@ class MpuHelper {
|
|||
if (err) {
|
||||
logHelper(logger, 'error', 'error in ' +
|
||||
'createMultipartUpload - final head object',
|
||||
err);
|
||||
err);
|
||||
return next(err);
|
||||
}
|
||||
mpuResult.ContentLength = res.ContentLength;
|
||||
|
|
|
@ -70,7 +70,7 @@ class GcpManagedUpload {
|
|||
if (this.body instanceof stream) {
|
||||
assert.strictEqual(typeof this.totalBytes, 'number',
|
||||
errors.MissingContentLength.customizeDescription(
|
||||
'If body is a stream, ContentLength must be provided'));
|
||||
'If body is a stream, ContentLength must be provided'));
|
||||
} else {
|
||||
if (typeof this.body === 'string') {
|
||||
this.body = Buffer.from(this.body);
|
||||
|
@ -156,13 +156,13 @@ class GcpManagedUpload {
|
|||
.map(item =>
|
||||
Object.assign(item, { ETag: this.parts[item.PartNumber] }));
|
||||
return this.service.completeMultipartUpload(params,
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
return this.cleanUp(err);
|
||||
}
|
||||
this.completed = true;
|
||||
return this.callback(null, res);
|
||||
});
|
||||
(err, res) => {
|
||||
if (err) {
|
||||
return this.cleanUp(err);
|
||||
}
|
||||
this.completed = true;
|
||||
return this.callback(null, res);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -187,16 +187,16 @@ class GcpManagedUpload {
|
|||
if (this.body instanceof stream) {
|
||||
// stream type
|
||||
this.body.on('error', err => this.cleanUp(err))
|
||||
.on('readable', () => this.chunkStream())
|
||||
.on('end', () => {
|
||||
this.isDoneChunking = true;
|
||||
this.chunkStream();
|
||||
.on('readable', () => this.chunkStream())
|
||||
.on('end', () => {
|
||||
this.isDoneChunking = true;
|
||||
this.chunkStream();
|
||||
|
||||
if (this.isDoneChunking && this.uploadedParts >= 1 &&
|
||||
if (this.isDoneChunking && this.uploadedParts >= 1 &&
|
||||
this.uploadedParts === this.totalParts) {
|
||||
this.completeUpload();
|
||||
}
|
||||
});
|
||||
this.completeUpload();
|
||||
}
|
||||
});
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
|
@ -251,22 +251,22 @@ class GcpManagedUpload {
|
|||
});
|
||||
},
|
||||
next => async.eachLimit(this.slicedParts, this.queueSize,
|
||||
(uploadPart, done) => {
|
||||
const params = {
|
||||
Bucket: this.mpuBucket,
|
||||
Key: this.params.Key,
|
||||
UploadId: this.uploadId,
|
||||
Body: uploadPart.Body,
|
||||
PartNumber: uploadPart.PartNumber,
|
||||
};
|
||||
this.service.uploadPart(params, (err, res) => {
|
||||
if (!err) {
|
||||
this.parts[uploadPart.PartNumber] = res.ETag;
|
||||
this.uploadedParts++;
|
||||
}
|
||||
return done(err);
|
||||
});
|
||||
}, next),
|
||||
(uploadPart, done) => {
|
||||
const params = {
|
||||
Bucket: this.mpuBucket,
|
||||
Key: this.params.Key,
|
||||
UploadId: this.uploadId,
|
||||
Body: uploadPart.Body,
|
||||
PartNumber: uploadPart.PartNumber,
|
||||
};
|
||||
this.service.uploadPart(params, (err, res) => {
|
||||
if (!err) {
|
||||
this.parts[uploadPart.PartNumber] = res.ETag;
|
||||
this.uploadedParts++;
|
||||
}
|
||||
return done(err);
|
||||
});
|
||||
}, next),
|
||||
], err => {
|
||||
if (err) {
|
||||
return this.cleanUp(new Error(
|
||||
|
|
|
@ -239,7 +239,7 @@ module.exports = {
|
|||
const requestId = resp.httpResponse.headers ?
|
||||
resp.httpResponse.headers['x-guploader-uploadid'] : null;
|
||||
if (resp.error) {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
resp.error.requestId = resp.requestId || requestId;
|
||||
}
|
||||
},
|
||||
|
|
|
@ -120,10 +120,10 @@ class GcpClient extends AwsClient {
|
|||
return this._client.createMultipartUpload(params, (err, mpuResObj) => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from data backend',
|
||||
err, this._dataStoreName, this.clientType);
|
||||
err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`GCP: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`GCP: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback(null, mpuResObj);
|
||||
|
@ -162,32 +162,32 @@ class GcpClient extends AwsClient {
|
|||
};
|
||||
const completeObjData = { key: gcpKey };
|
||||
return this._client.completeMultipartUpload(mpuParams,
|
||||
(err, completeMpuRes) => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from data backend on ' +
|
||||
(err, completeMpuRes) => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from data backend on ' +
|
||||
'completeMPU', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`GCP: ${err.message}`)
|
||||
);
|
||||
}
|
||||
if (!completeMpuRes.VersionId) {
|
||||
logHelper(log, 'error', 'missing version id for data ' +
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`GCP: ${err.message}`),
|
||||
);
|
||||
}
|
||||
if (!completeMpuRes.VersionId) {
|
||||
logHelper(log, 'error', 'missing version id for data ' +
|
||||
'backend object', missingVerIdInternalError,
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(missingVerIdInternalError);
|
||||
}
|
||||
// remove quotes from eTag because they're added later
|
||||
completeObjData.eTag = removeQuotes(completeMpuRes.ETag);
|
||||
completeObjData.dataStoreVersionId = completeMpuRes.VersionId;
|
||||
completeObjData.contentLength =
|
||||
return callback(missingVerIdInternalError);
|
||||
}
|
||||
// remove quotes from eTag because they're added later
|
||||
completeObjData.eTag = removeQuotes(completeMpuRes.ETag);
|
||||
completeObjData.dataStoreVersionId = completeMpuRes.VersionId;
|
||||
completeObjData.contentLength =
|
||||
Number.parseInt(completeMpuRes.ContentLength, 10);
|
||||
return callback(null, completeObjData);
|
||||
});
|
||||
return callback(null, completeObjData);
|
||||
});
|
||||
}
|
||||
|
||||
uploadPart(request, streamingV4Params, stream, size, key, uploadId,
|
||||
partNumber, bucketName, log, callback) {
|
||||
partNumber, bucketName, log, callback) {
|
||||
let hashedStream = stream;
|
||||
if (request) {
|
||||
const partStream = prepareStream(request, streamingV4Params,
|
||||
|
@ -209,8 +209,8 @@ class GcpClient extends AwsClient {
|
|||
logHelper(log, 'error', 'err from data backend ' +
|
||||
'on uploadPart', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`GCP: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`GCP: ${err.message}`),
|
||||
);
|
||||
}
|
||||
// remove quotes from eTag because they're added later
|
||||
|
@ -226,11 +226,11 @@ class GcpClient extends AwsClient {
|
|||
}
|
||||
|
||||
uploadPartCopy(request, gcpSourceKey, sourceLocationConstraintName, config,
|
||||
log, callback) {
|
||||
log, callback) {
|
||||
const destBucketName = request.bucketName;
|
||||
const destObjectKey = request.objectKey;
|
||||
const destGcpKey = this._createGcpKey(destBucketName, destObjectKey,
|
||||
this._bucketMatch);
|
||||
this._bucketMatch);
|
||||
|
||||
const sourceGcpBucketName =
|
||||
config.getGcpBucketNames(sourceLocationConstraintName).bucketName;
|
||||
|
@ -241,8 +241,8 @@ class GcpClient extends AwsClient {
|
|||
|
||||
if (copySourceRange) {
|
||||
return callback(errors.NotImplemented
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.clientType}: copySourceRange not implemented`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`${this.clientType}: copySourceRange not implemented`),
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -260,15 +260,15 @@ class GcpClient extends AwsClient {
|
|||
`${sourceGcpBucketName} GCP bucket`, err,
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.AccessDenied
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceGcpBucketName} GCP bucket`)
|
||||
.customizeDescription('Error: Unable to access ' +
|
||||
`${sourceGcpBucketName} GCP bucket`),
|
||||
);
|
||||
}
|
||||
logHelper(log, 'error', 'error from data backend on ' +
|
||||
'uploadPartCopy', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`GCP: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`GCP: ${err.message}`),
|
||||
);
|
||||
}
|
||||
// remove quotes from eTag because they're added later
|
||||
|
@ -290,8 +290,8 @@ class GcpClient extends AwsClient {
|
|||
logHelper(log, 'error', 'err from data backend ' +
|
||||
'on abortMPU', err, this._dataStoreName, this.clientType);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned from ' +
|
||||
`GCP: ${err.message}`)
|
||||
.customizeDescription('Error returned from ' +
|
||||
`GCP: ${err.message}`),
|
||||
);
|
||||
}
|
||||
return callback();
|
||||
|
|
|
@ -45,7 +45,7 @@ class PfsClient {
|
|||
}
|
||||
return callback(null, keyContext.objectKey, '',
|
||||
keyContext.metaHeaders['x-amz-meta-size'],
|
||||
md5
|
||||
md5,
|
||||
);
|
||||
}
|
||||
logHelper(log, 'error', 'Not implemented', errors.NotImplemented,
|
||||
|
@ -72,7 +72,7 @@ class PfsClient {
|
|||
this._restClient.delete(key, reqUids, err => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from data backend', err,
|
||||
this._dataStoreName, this.clientType);
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(err);
|
||||
}
|
||||
return callback();
|
||||
|
@ -86,61 +86,61 @@ class PfsClient {
|
|||
}
|
||||
|
||||
createMPU(key, metaHeaders, bucketName, websiteRedirectHeader, contentType,
|
||||
cacheControl, contentDisposition, contentEncoding, log, callback) {
|
||||
cacheControl, contentDisposition, contentEncoding, log, callback) {
|
||||
logHelper(log, 'error', 'Not implemented', errors.NotImplemented,
|
||||
this._dataStoreName, this.clientType);
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.NotImplemented);
|
||||
}
|
||||
|
||||
uploadPart(request, streamingV4Params, stream, size, key, uploadId,
|
||||
partNumber, bucketName, log, callback) {
|
||||
partNumber, bucketName, log, callback) {
|
||||
logHelper(log, 'error', 'Not implemented', errors.NotImplemented,
|
||||
this._dataStoreName, this.clientType);
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.NotImplemented);
|
||||
}
|
||||
|
||||
listParts(key, uploadId, bucketName, partNumberMarker, maxParts, log,
|
||||
callback) {
|
||||
callback) {
|
||||
logHelper(log, 'error', 'Not implemented', errors.NotImplemented,
|
||||
this._dataStoreName, this.clientType);
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.NotImplemented);
|
||||
}
|
||||
|
||||
completeMPU(jsonList, mdInfo, key, uploadId, bucketName, log, callback) {
|
||||
logHelper(log, 'error', 'Not implemented', errors.NotImplemented,
|
||||
this._dataStoreName, this.clientType);
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.NotImplemented);
|
||||
}
|
||||
|
||||
abortMPU(key, uploadId, bucketName, log, callback) {
|
||||
logHelper(log, 'error', 'Not implemented', errors.NotImplemented,
|
||||
this._dataStoreName, this.clientType);
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.NotImplemented);
|
||||
}
|
||||
|
||||
objectPutTagging(key, bucket, objectMD, log, callback) {
|
||||
logHelper(log, 'error', 'Not implemented', errors.NotImplemented,
|
||||
this._dataStoreName, this.clientType);
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.NotImplemented);
|
||||
}
|
||||
|
||||
objectDeleteTagging(key, bucketName, objectMD, log, callback) {
|
||||
logHelper(log, 'error', 'Not implemented', errors.NotImplemented,
|
||||
this._dataStoreName, this.clientType);
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.NotImplemented);
|
||||
}
|
||||
|
||||
copyObject(request, destLocationConstraintName, sourceKey,
|
||||
sourceLocationConstraintName, storeMetadataParams, config, log, callback) {
|
||||
sourceLocationConstraintName, storeMetadataParams, config, log, callback) {
|
||||
logHelper(log, 'error', 'Not implemented', errors.NotImplemented,
|
||||
this._dataStoreName, this.clientType);
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.NotImplemented);
|
||||
}
|
||||
|
||||
uploadPartCopy(request, awsSourceKey, sourceLocationConstraintName,
|
||||
config, log, callback) {
|
||||
config, log, callback) {
|
||||
logHelper(log, 'error', 'Not implemented', errors.NotImplemented,
|
||||
this._dataStoreName, this.clientType);
|
||||
this._dataStoreName, this.clientType);
|
||||
return callback(errors.NotImplemented);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ const utils = {
|
|||
* same account since Azure copy outside of an account is async
|
||||
*/
|
||||
externalBackendCopy(config, locationConstraintSrc, locationConstraintDest,
|
||||
sourceBucketMD, destBucketMD) {
|
||||
sourceBucketMD, destBucketMD) {
|
||||
const sourceBucketName = sourceBucketMD.getName();
|
||||
const destBucketName = destBucketMD.getName();
|
||||
const isSameBucket = sourceBucketName === destBucketName;
|
||||
|
@ -111,11 +111,11 @@ const utils = {
|
|||
sourceLocationConstraintType === 'gcp' ||
|
||||
(sourceLocationConstraintType === 'azure' &&
|
||||
config.isSameAzureAccount(locationConstraintSrc,
|
||||
locationConstraintDest)));
|
||||
locationConstraintDest)));
|
||||
},
|
||||
|
||||
checkExternalBackend(clients, locations, type, flightCheckOnStartUp,
|
||||
externalBackendHealthCheckInterval, cb) {
|
||||
externalBackendHealthCheckInterval, cb) {
|
||||
const checkStatus = backendHealth[type] || {};
|
||||
if (locations.length === 0) {
|
||||
return process.nextTick(cb, null, []);
|
||||
|
|
|
@ -35,7 +35,6 @@ const FOLDER_HASH = 3511;
|
|||
* directory hash structure under the configured dataPath.
|
||||
*/
|
||||
class DataFileStore {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
* @param {Object} dataConfig - configuration of the file backend
|
||||
|
@ -78,7 +77,7 @@ class DataFileStore {
|
|||
fs.access(this.dataPath, fs.F_OK | fs.R_OK | fs.W_OK, err => {
|
||||
if (err) {
|
||||
this.logger.error('Data path is not readable or writable',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
return callback(err);
|
||||
}
|
||||
if (this.isPassthrough) {
|
||||
|
@ -86,7 +85,7 @@ class DataFileStore {
|
|||
}
|
||||
// Create FOLDER_HASH subdirectories
|
||||
const subDirs = Array.from({ length: FOLDER_HASH },
|
||||
(v, k) => (k).toString());
|
||||
(v, k) => (k).toString());
|
||||
this.logger.info(`pre-creating ${subDirs.length} subdirs...`);
|
||||
if (!this.noSync) {
|
||||
storageUtils.setDirSyncFlag(this.dataPath, this.logger);
|
||||
|
@ -103,7 +102,7 @@ class DataFileStore {
|
|||
err => {
|
||||
if (err) {
|
||||
this.logger.error('Error creating subdirs',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
return callback(err);
|
||||
}
|
||||
this.logger.info('data file store init complete, ' +
|
||||
|
@ -167,7 +166,7 @@ class DataFileStore {
|
|||
fs.open(filePath, 'wx', (err, fd) => {
|
||||
if (err) {
|
||||
log.error('error opening filePath',
|
||||
{ method: 'put', key, filePath, error: err });
|
||||
{ method: 'put', key, filePath, error: err });
|
||||
return callback(errors.InternalError.customizeDescription(
|
||||
`filesystem error: open() returned ${err.code}`));
|
||||
}
|
||||
|
@ -181,7 +180,7 @@ class DataFileStore {
|
|||
fileStream.on('finish', () => {
|
||||
function ok() {
|
||||
log.debug('finished writing data',
|
||||
{ method: 'put', key, filePath });
|
||||
{ method: 'put', key, filePath });
|
||||
return cbOnce(null, key);
|
||||
}
|
||||
if (this.noSync) {
|
||||
|
@ -243,7 +242,7 @@ class DataFileStore {
|
|||
return undefined;
|
||||
}).on('error', err => {
|
||||
log.error('error streaming data on write',
|
||||
{ method: 'put', key, filePath, error: err });
|
||||
{ method: 'put', key, filePath, error: err });
|
||||
// destroying the write stream forces a close(fd)
|
||||
fileStream.destroy();
|
||||
return cbOnce(errors.InternalError.customizeDescription(
|
||||
|
@ -262,7 +261,7 @@ class DataFileStore {
|
|||
dataStream.on('close', () => {
|
||||
// this means the underlying socket has been closed
|
||||
log.debug('Client closed socket while streaming',
|
||||
{ method: 'put', key, filePath });
|
||||
{ method: 'put', key, filePath });
|
||||
// destroying the write stream forces a close(fd)
|
||||
fileStream.destroy();
|
||||
// we need to unlink the file ourselves
|
||||
|
@ -294,7 +293,7 @@ class DataFileStore {
|
|||
return callback(errors.ObjNotFound);
|
||||
}
|
||||
log.error('error on \'stat\' of file',
|
||||
{ key, filePath, error: err });
|
||||
{ key, filePath, error: err });
|
||||
return callback(errors.InternalError.customizeDescription(
|
||||
`filesystem error: stat() returned ${err.code}`));
|
||||
}
|
||||
|
@ -330,34 +329,34 @@ class DataFileStore {
|
|||
readStreamOptions.end = byteRange[1];
|
||||
}
|
||||
log.debug('opening readStream to get data',
|
||||
{ method: 'get', key, filePath, byteRange });
|
||||
{ method: 'get', key, filePath, byteRange });
|
||||
const cbOnce = jsutil.once(callback);
|
||||
const rs = fs.createReadStream(filePath, readStreamOptions)
|
||||
.on('error', err => {
|
||||
if (err.code === 'ENOENT') {
|
||||
return cbOnce(errors.ObjNotFound);
|
||||
.on('error', err => {
|
||||
if (err.code === 'ENOENT') {
|
||||
return cbOnce(errors.ObjNotFound);
|
||||
}
|
||||
log.error('error retrieving file',
|
||||
{ method: 'DataFileStore.get', key, filePath,
|
||||
error: err });
|
||||
return cbOnce(
|
||||
errors.InternalError.customizeDescription(
|
||||
`filesystem read error: ${err.code}`));
|
||||
})
|
||||
.on('open', () => { cbOnce(null, rs); })
|
||||
.on('end', () => {
|
||||
if (this.noCache) {
|
||||
releasePageCacheSync(filePath, rs.fd, log);
|
||||
}
|
||||
fs.close(rs.fd, err => {
|
||||
if (err) {
|
||||
log.error('unable to close file descriptor', {
|
||||
method: 'DataFileStore.get', key, filePath,
|
||||
error: err,
|
||||
});
|
||||
}
|
||||
log.error('error retrieving file',
|
||||
{ method: 'DataFileStore.get', key, filePath,
|
||||
error: err });
|
||||
return cbOnce(
|
||||
errors.InternalError.customizeDescription(
|
||||
`filesystem read error: ${err.code}`));
|
||||
})
|
||||
.on('open', () => { cbOnce(null, rs); })
|
||||
.on('end', () => {
|
||||
if (this.noCache) {
|
||||
releasePageCacheSync(filePath, rs.fd, log);
|
||||
}
|
||||
fs.close(rs.fd, err => {
|
||||
if (err) {
|
||||
log.error('unable to close file descriptor', {
|
||||
method: 'DataFileStore.get', key, filePath,
|
||||
error: err,
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -12,7 +12,7 @@ function releasePageCacheSync(filePath, fd, log) {
|
|||
const ret = posixFadvise(fd, 0, 0, 4);
|
||||
if (ret !== 0) {
|
||||
log.warning(
|
||||
`error fadv_dontneed ${filePath} returned ${ret}`);
|
||||
`error fadv_dontneed ${filePath} returned ${ret}`);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -37,16 +37,16 @@ const backend = {
|
|||
}
|
||||
cursor += data.length;
|
||||
})
|
||||
.on('end', () => {
|
||||
if (exceeded) {
|
||||
log.error('data stream exceed announced size',
|
||||
{ size, overflow: cursor });
|
||||
callback(errors.InternalError);
|
||||
} else {
|
||||
ds[count] = { value, keyContext };
|
||||
callback(null, count++);
|
||||
}
|
||||
});
|
||||
.on('end', () => {
|
||||
if (exceeded) {
|
||||
log.error('data stream exceed announced size',
|
||||
{ size, overflow: cursor });
|
||||
callback(errors.InternalError);
|
||||
} else {
|
||||
ds[count] = { value, keyContext };
|
||||
callback(null, count++);
|
||||
}
|
||||
});
|
||||
},
|
||||
|
||||
get: function getMem(objectGetInfo, range, reqUids, callback) {
|
||||
|
|
|
@ -181,25 +181,25 @@ class MetadataWrapper {
|
|||
const value = typeof objVal.getValue === 'function' ?
|
||||
objVal.getValue() : objVal;
|
||||
this.client.putObject(bucketName, objName, value, params, log,
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', { implName: this.implName,
|
||||
error: err });
|
||||
return cb(err);
|
||||
}
|
||||
if (data) {
|
||||
log.debug('object version successfully put in metadata',
|
||||
{ version: data });
|
||||
} else {
|
||||
log.debug('object successfully put in metadata');
|
||||
}
|
||||
return cb(err, data);
|
||||
});
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
log.debug('error from metadata', { implName: this.implName,
|
||||
error: err });
|
||||
return cb(err);
|
||||
}
|
||||
if (data) {
|
||||
log.debug('object version successfully put in metadata',
|
||||
{ version: data });
|
||||
} else {
|
||||
log.debug('object successfully put in metadata');
|
||||
}
|
||||
return cb(err, data);
|
||||
});
|
||||
}
|
||||
|
||||
getBucketAndObjectMD(bucketName, objName, params, log, cb) {
|
||||
log.debug('getting bucket and object from metadata',
|
||||
{ database: bucketName, object: objName });
|
||||
{ database: bucketName, object: objName });
|
||||
this.client.getBucketAndObject(bucketName, objName, params, log,
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
|
@ -208,7 +208,7 @@ class MetadataWrapper {
|
|||
return cb(err);
|
||||
}
|
||||
log.debug('bucket and object retrieved from metadata',
|
||||
{ database: bucketName, object: objName });
|
||||
{ database: bucketName, object: objName });
|
||||
return cb(err, data);
|
||||
});
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ const BucketInfo = require('../../../models/BucketInfo');
|
|||
class BucketClientInterface {
|
||||
constructor(params, bucketclient, logger) {
|
||||
assert(params.bucketdBootstrap.length > 0,
|
||||
'bucketd bootstrap list is empty');
|
||||
'bucketd bootstrap list is empty');
|
||||
const bootstrap = params.bucketdBootstrap;
|
||||
const log = params.bucketdLog;
|
||||
if (params.https) {
|
||||
|
@ -29,7 +29,7 @@ class BucketClientInterface {
|
|||
|
||||
createBucket(bucketName, bucketMD, log, cb) {
|
||||
this.client.createBucket(bucketName, log.getSerializedUids(),
|
||||
bucketMD.serialize(), cb);
|
||||
bucketMD.serialize(), cb);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -57,17 +57,17 @@ class BucketClientInterface {
|
|||
|
||||
getRaftBuckets(raftId, log, cb) {
|
||||
return this.client.getRaftBuckets(raftId, log.getSerializedUids(),
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, JSON.parse(data));
|
||||
});
|
||||
(err, data) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
return cb(null, JSON.parse(data));
|
||||
});
|
||||
}
|
||||
|
||||
putBucketAttributes(bucketName, bucketMD, log, cb) {
|
||||
this.client.putBucketAttributes(bucketName, log.getSerializedUids(),
|
||||
bucketMD.serialize(), cb);
|
||||
bucketMD.serialize(), cb);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -95,7 +95,7 @@ class BucketClientInterface {
|
|||
|
||||
deleteObject(bucketName, objName, params, log, cb) {
|
||||
this.client.deleteObject(bucketName, objName, log.getSerializedUids(),
|
||||
cb, params);
|
||||
cb, params);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -183,8 +183,8 @@ class BucketClientInterface {
|
|||
reason.msg = undefined;
|
||||
respBody[implName] = {
|
||||
code: 200,
|
||||
message, // Provide interpreted reason msg
|
||||
body: reason, // Provide analysis data
|
||||
message, // Provide interpreted reason msg
|
||||
body: reason, // Provide analysis data
|
||||
};
|
||||
if (failure) {
|
||||
// Setting the `error` field is how the healthCheck
|
||||
|
|
|
@ -30,7 +30,6 @@ class ListRecordStream extends stream.Transform {
|
|||
* @classdesc Proxy object to access raft log API
|
||||
*/
|
||||
class LogConsumer {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
*
|
||||
|
@ -97,14 +96,14 @@ class LogConsumer {
|
|||
if (err.code === 404) {
|
||||
// no such raft session, log and ignore
|
||||
this.logger.warn('raft session does not exist yet',
|
||||
{ raftId: this.raftSession });
|
||||
{ raftId: this.raftSession });
|
||||
return cbOnce(null, { info: { start: null,
|
||||
end: null } });
|
||||
}
|
||||
if (err.code === 416) {
|
||||
// requested range not satisfiable
|
||||
this.logger.debug('no new log record to process',
|
||||
{ raftId: this.raftSession });
|
||||
{ raftId: this.raftSession });
|
||||
return cbOnce(null, { info: { start: null,
|
||||
end: null } });
|
||||
}
|
||||
|
@ -116,7 +115,7 @@ class LogConsumer {
|
|||
// is emitted
|
||||
recordStream.on('error', err => {
|
||||
this.logger.error('error receiving raft log',
|
||||
{ error: err.message });
|
||||
{ error: err.message });
|
||||
return cbOnce(errors.InternalError);
|
||||
});
|
||||
const jsonResponse = stream.pipe(jsonStream.parse('log.*'));
|
||||
|
@ -127,7 +126,7 @@ class LogConsumer {
|
|||
// remove temporary listener
|
||||
recordStream.removeAllListeners('error');
|
||||
return cbOnce(null, { info: header.info,
|
||||
log: recordStream });
|
||||
log: recordStream });
|
||||
})
|
||||
.on('error', err => recordStream.emit('error', err));
|
||||
return undefined;
|
||||
|
|
|
@ -14,13 +14,13 @@ const _operatorType1 = joi.string().valid(
|
|||
'$gt',
|
||||
'$gte',
|
||||
'$lt',
|
||||
'$lte'
|
||||
'$lte',
|
||||
);
|
||||
|
||||
// supports strings, numbers, and boolean
|
||||
const _operatorType2 = joi.string().valid(
|
||||
'$eq',
|
||||
'$ne'
|
||||
'$ne',
|
||||
);
|
||||
|
||||
const _valueType1 = joi.alternatives([
|
||||
|
|
|
@ -10,14 +10,13 @@ const list = require('../../../algos/list/exportAlgos');
|
|||
const MetadataFileClient = require('./MetadataFileClient');
|
||||
const versionSep =
|
||||
require('../../../versioning/constants')
|
||||
.VersioningConstants.VersionId.Separator;
|
||||
.VersioningConstants.VersionId.Separator;
|
||||
|
||||
const METASTORE = '__metastore';
|
||||
|
||||
const itemScanRefreshDelay = 1000 * 30 * 60; // 30 minutes
|
||||
|
||||
class BucketFileInterface {
|
||||
|
||||
/**
|
||||
* @constructor
|
||||
* @param {object} [params] - constructor params
|
||||
|
@ -65,7 +64,7 @@ class BucketFileInterface {
|
|||
if (err) {
|
||||
this.logger.fatal('error writing usersBucket ' +
|
||||
'attributes to metadata',
|
||||
{ error: err });
|
||||
{ error: err });
|
||||
throw (errors.InternalError);
|
||||
}
|
||||
return done();
|
||||
|
@ -104,8 +103,8 @@ class BucketFileInterface {
|
|||
}
|
||||
this.lastItemScanTime = null;
|
||||
this.putBucketAttributes(bucketName,
|
||||
bucketMD,
|
||||
log, cb);
|
||||
bucketMD,
|
||||
log, cb);
|
||||
return undefined;
|
||||
});
|
||||
}
|
||||
|
@ -191,7 +190,7 @@ class BucketFileInterface {
|
|||
errorStack: err.stack,
|
||||
};
|
||||
log.error('error deleting bucket',
|
||||
logObj);
|
||||
logObj);
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
this.lastItemScanTime = null;
|
||||
|
@ -392,16 +391,16 @@ class BucketFileInterface {
|
|||
cbDone = true;
|
||||
async.eachSeries(res.bucketList, (bucket, cb) => {
|
||||
this.getBucketAttributes(bucket.name, log,
|
||||
(err, bucketInfo) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
/* eslint-disable no-param-reassign */
|
||||
bucket.location =
|
||||
(err, bucketInfo) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
/* eslint-disable no-param-reassign */
|
||||
bucket.location =
|
||||
bucketInfo.getLocationConstraint();
|
||||
/* eslint-enable no-param-reassign */
|
||||
return cb();
|
||||
});
|
||||
/* eslint-enable no-param-reassign */
|
||||
return cb();
|
||||
});
|
||||
}, err => {
|
||||
if (!err) {
|
||||
this.lastItemScanTime = Date.now();
|
||||
|
|
|
@ -8,7 +8,6 @@ const { RecordLogProxy } = require('./RecordLog.js');
|
|||
const werelogs = require('werelogs');
|
||||
|
||||
class MetadataFileClient {
|
||||
|
||||
/**
|
||||
* Construct a metadata client
|
||||
*
|
||||
|
@ -86,7 +85,7 @@ class MetadataFileClient {
|
|||
logProxy.connect(err => {
|
||||
if (err) {
|
||||
this.logger.error('error connecting to record log service',
|
||||
{ url, error: err.stack });
|
||||
{ url, error: err.stack });
|
||||
return done(err);
|
||||
}
|
||||
this.logger.info('connected to record log service', { url });
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue