Compare commits
44 Commits
developmen
...
ft/ZENKO-1
Author | SHA1 | Date |
---|---|---|
Salim | 0916b0daf9 | |
Alexander Chan | 6081d974aa | |
Alexander Chan | 4528bbad42 | |
Dora Korpar | 68dc315eb7 | |
Rached Ben Mustapha | c4e01ef024 | |
LaureVergeron | 9b8c10d288 | |
vrancurel | bb543b5767 | |
vrancurel | 0a67309294 | |
Lauren Spiegel | ae9dfab115 | |
Lauren Spiegel | 16e8e841bc | |
Lauren Spiegel | 1ef6f24adb | |
Lauren Spiegel | 4e3a23d88c | |
Lauren Spiegel | f62d6183c9 | |
Lauren Spiegel | 0c845a907d | |
JianqinWang | c8c564ccdf | |
Rached Ben Mustapha | 8e71d96b70 | |
Rached Ben Mustapha | e95f5a3997 | |
Rached Ben Mustapha | 496bf74757 | |
Rached Ben Mustapha | 97fc8dd8b4 | |
Rached Ben Mustapha | 66407a3ead | |
Rached Ben Mustapha | 35b2bde801 | |
Rached Ben Mustapha | b1c9059c68 | |
Rached Ben Mustapha | 6ebe4540d9 | |
Rached Ben Mustapha | 3eeb932539 | |
Rached Ben Mustapha | 4fa1c84e6a | |
Rached Ben Mustapha | 55792107f1 | |
Rached Ben Mustapha | 3c38b38a41 | |
Rached Ben Mustapha | d9b99e3f45 | |
Rached Ben Mustapha | 869d5fba14 | |
Rached Ben Mustapha | 6d540e0818 | |
Rached Ben Mustapha | b78ae39179 | |
Rached Ben Mustapha | de7968f4b5 | |
Rached Ben Mustapha | 8f828ec8ca | |
Rached Ben Mustapha | 80beec6dbc | |
Rached Ben Mustapha | 88c434ec21 | |
Rached Ben Mustapha | d31ce80931 | |
Rached Ben Mustapha | ae982b0b94 | |
Rached Ben Mustapha | 1d713404c4 | |
Rached Ben Mustapha | c0ba0eb6fc | |
Rached Ben Mustapha | fb382a03d3 | |
Rached Ben Mustapha | 9cae60af7e | |
Rached Ben Mustapha | bfed27dd65 | |
Rached Ben Mustapha | f78625e2f1 | |
Rached Ben Mustapha | ec9fa9b0f3 |
|
@ -0,0 +1,3 @@
|
|||
node_modules
|
||||
localData/*
|
||||
localMetadata/*
|
|
@ -20,4 +20,4 @@ VOLUME ["/usr/src/app/localData","/usr/src/app/localMetadata"]
|
|||
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
||||
CMD [ "npm", "start" ]
|
||||
|
||||
EXPOSE 8000
|
||||
EXPOSE 8001
|
||||
|
|
|
@ -19,4 +19,4 @@ ENV S3BACKEND mem
|
|||
ENTRYPOINT ["/usr/src/app/docker-entrypoint.sh"]
|
||||
CMD [ "npm", "start" ]
|
||||
|
||||
EXPOSE 8000
|
||||
EXPOSE 8001
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
#!/bin/sh
|
||||
// 2>/dev/null ; exec "$(which nodejs 2>/dev/null || which node)" "$0" "$@"
|
||||
'use strict'; // eslint-disable-line strict
|
||||
|
||||
const { auth } = require('arsenal');
|
||||
const commander = require('commander');
|
||||
|
||||
const http = require('http');
|
||||
const https = require('https');
|
||||
const logger = require('../lib/utilities/logger');
|
||||
|
||||
function _performSearch(host,
|
||||
port,
|
||||
bucketName,
|
||||
query,
|
||||
accessKey,
|
||||
secretKey,
|
||||
verbose, ssl) {
|
||||
const escapedSearch = encodeURIComponent(query);
|
||||
const options = {
|
||||
host,
|
||||
port,
|
||||
method: 'GET',
|
||||
path: `/${bucketName}/?search=${escapedSearch}`,
|
||||
headers: {
|
||||
'Content-Length': 0,
|
||||
},
|
||||
rejectUnauthorized: false,
|
||||
};
|
||||
const transport = ssl ? https : http;
|
||||
const request = transport.request(options, response => {
|
||||
if (verbose) {
|
||||
logger.info('response status code', {
|
||||
statusCode: response.statusCode,
|
||||
});
|
||||
logger.info('response headers', { headers: response.headers });
|
||||
}
|
||||
const body = [];
|
||||
response.setEncoding('utf8');
|
||||
response.on('data', chunk => body.push(chunk));
|
||||
response.on('end', () => {
|
||||
if (response.statusCode >= 200 && response.statusCode < 300) {
|
||||
logger.info('Success');
|
||||
process.stdout.write(body.join(''));
|
||||
process.exit(0);
|
||||
} else {
|
||||
logger.error('request failed with HTTP Status ', {
|
||||
statusCode: response.statusCode,
|
||||
body: body.join(''),
|
||||
});
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
});
|
||||
// generateV4Headers exepects request object with path that does not
|
||||
// include query
|
||||
request.path = `/${bucketName}`;
|
||||
auth.client.generateV4Headers(request, { search: query },
|
||||
accessKey, secretKey, 's3');
|
||||
request.path = `/${bucketName}?search=${escapedSearch}`;
|
||||
if (verbose) {
|
||||
logger.info('request headers', { headers: request._headers });
|
||||
}
|
||||
request.end();
|
||||
}
|
||||
|
||||
/**
|
||||
* This function is used as a binary to send a request to S3 to perform a
|
||||
* search on the objects in a bucket
|
||||
*
|
||||
* @return {undefined}
|
||||
*/
|
||||
function searchBucket() {
|
||||
// TODO: Include other bucket listing possible query params?
|
||||
commander
|
||||
.version('0.0.1')
|
||||
.option('-a, --access-key <accessKey>', 'Access key id')
|
||||
.option('-k, --secret-key <secretKey>', 'Secret access key')
|
||||
.option('-b, --bucket <bucket>', 'Name of the bucket')
|
||||
.option('-q, --query <query>', 'Search query')
|
||||
.option('-h, --host <host>', 'Host of the server')
|
||||
.option('-p, --port <port>', 'Port of the server')
|
||||
.option('-s', '--ssl', 'Enable ssl')
|
||||
.option('-v, --verbose')
|
||||
.parse(process.argv);
|
||||
|
||||
const { host, port, accessKey, secretKey, bucket, query, verbose, ssl } =
|
||||
commander;
|
||||
|
||||
if (!host || !port || !accessKey || !secretKey || !bucket || !query) {
|
||||
logger.error('missing parameter');
|
||||
commander.outputHelp();
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
_performSearch(host, port, bucket, query, accessKey, secretKey, verbose,
|
||||
ssl);
|
||||
}
|
||||
|
||||
searchBucket();
|
|
@ -19,5 +19,16 @@
|
|||
"access": "accessKey2",
|
||||
"secret": "verySecretKey2"
|
||||
}]
|
||||
},
|
||||
{
|
||||
"name": "Clueso",
|
||||
"email": "inspector@clueso.info",
|
||||
"arn": "arn:aws:iam::123456789014:root",
|
||||
"canonicalID": "http://acs.zenko.io/accounts/service/clueso",
|
||||
"shortid": "123456789014",
|
||||
"keys": [{
|
||||
"access": "cluesoKey1",
|
||||
"secret": "cluesoSecretKey1"
|
||||
}]
|
||||
}]
|
||||
}
|
||||
|
|
20
config.json
20
config.json
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"port": 8000,
|
||||
"port": 8001,
|
||||
"listenOn": [],
|
||||
"replicationGroupId": "RG001",
|
||||
"restEndpoints": {
|
||||
|
@ -8,7 +8,9 @@
|
|||
"cloudserver-front": "us-east-1",
|
||||
"s3.docker.test": "us-east-1",
|
||||
"127.0.0.2": "us-east-1",
|
||||
"s3.amazonaws.com": "us-east-1"
|
||||
"s3.amazonaws.com": "us-east-1",
|
||||
"zenko-cloudserver-replicator": "us-east-1",
|
||||
"lb": "us-east-1"
|
||||
},
|
||||
"websiteEndpoints": ["s3-website-us-east-1.amazonaws.com",
|
||||
"s3-website.us-east-2.amazonaws.com",
|
||||
|
@ -45,32 +47,32 @@
|
|||
"host": "localhost",
|
||||
"port": 8500
|
||||
},
|
||||
"clusters": 10,
|
||||
"clusters": 1,
|
||||
"log": {
|
||||
"logLevel": "info",
|
||||
"dumpLevel": "error"
|
||||
},
|
||||
"healthChecks": {
|
||||
"allowFrom": ["127.0.0.1/8", "::1"]
|
||||
"allowFrom": ["127.0.0.1/8", "::1", "0.0.0.0/0"]
|
||||
},
|
||||
"metadataClient": {
|
||||
"host": "localhost",
|
||||
"port": 9990
|
||||
"port": 9993
|
||||
},
|
||||
"dataClient": {
|
||||
"host": "localhost",
|
||||
"port": 9991
|
||||
"port": 9992
|
||||
},
|
||||
"metadataDaemon": {
|
||||
"bindAddress": "localhost",
|
||||
"port": 9990
|
||||
"port": 9993
|
||||
},
|
||||
"dataDaemon": {
|
||||
"bindAddress": "localhost",
|
||||
"port": 9991
|
||||
"port": 9992
|
||||
},
|
||||
"recordLog": {
|
||||
"enabled": false,
|
||||
"enabled": true,
|
||||
"recordLogName": "s3-recordlog"
|
||||
},
|
||||
"mongodb": {
|
||||
|
|
|
@ -115,7 +115,7 @@ const constants = {
|
|||
// for external backends, don't call unless at least 1 minute
|
||||
// (60,000 milliseconds) since last call
|
||||
externalBackendHealthCheckInterval: 60000,
|
||||
versioningNotImplBackends: { azure: true },
|
||||
versioningNotImplBackends: {},
|
||||
mpuMDStoredExternallyBackend: { aws_s3: true },
|
||||
/* eslint-enable camelcase */
|
||||
mpuMDStoredOnS3Backend: { azure: true },
|
||||
|
|
|
@ -6,13 +6,15 @@ const logger = require('./lib/utilities/logger');
|
|||
|
||||
if (config.backends.data === 'file' ||
|
||||
(config.backends.data === 'multiple' &&
|
||||
config.backends.metadata !== 'scality')) {
|
||||
config.backends.metadata !== 'scality' &&
|
||||
config.backends.metadata !== 'mongodb')) {
|
||||
const dataServer = new arsenal.network.rest.RESTServer(
|
||||
{ bindAddress: config.dataDaemon.bindAddress,
|
||||
port: config.dataDaemon.port,
|
||||
dataStore: new arsenal.storage.data.file.DataFileStore(
|
||||
{ dataPath: config.dataDaemon.dataPath,
|
||||
log: config.log }),
|
||||
log: config.log,
|
||||
noSync: true }),
|
||||
log: config.log });
|
||||
dataServer.setup(err => {
|
||||
if (err) {
|
||||
|
|
|
@ -81,13 +81,8 @@ if [[ "$METADATA_HOST" ]]; then
|
|||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .metadataClient.host=\"$METADATA_HOST\""
|
||||
fi
|
||||
|
||||
if [[ "$MONGODB_HOST" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.host=\"$MONGODB_HOST\""
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.port=27017"
|
||||
fi
|
||||
|
||||
if [[ "$MONGODB_PORT" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.port=$MONGODB_PORT"
|
||||
if [[ "$MONGODB_HOSTS" ]]; then
|
||||
JQ_FILTERS_CONFIG="$JQ_FILTERS_CONFIG | .mongodb.hosts=\"$MONGODB_HOSTS\""
|
||||
fi
|
||||
|
||||
if [[ "$MONGODB_RS" ]]; then
|
||||
|
@ -112,6 +107,10 @@ if [[ $JQ_FILTERS_CONFIG != "." ]]; then
|
|||
mv config.json.tmp config.json
|
||||
fi
|
||||
|
||||
if test -v INITIAL_INSTANCE_ID && test -v S3METADATAPATH && ! test -f ${S3METADATAPATH}/uuid ; then
|
||||
echo -n ${INITIAL_INSTANCE_ID} > ${S3METADATAPATH}/uuid
|
||||
fi
|
||||
|
||||
# s3 secret credentials for Zenko
|
||||
if [ -r /run/secrets/s3-credentials ] ; then
|
||||
. /run/secrets/s3-credentials
|
||||
|
|
|
@ -820,6 +820,7 @@ class Config extends EventEmitter {
|
|||
process.env.REPORT_TOKEN ||
|
||||
config.reportToken ||
|
||||
uuid.v4().toString();
|
||||
this.reportEndpoint = process.env.REPORT_ENDPOINT;
|
||||
}
|
||||
|
||||
_configureBackends() {
|
||||
|
@ -827,7 +828,7 @@ class Config extends EventEmitter {
|
|||
* Configure the backends for Authentication, Data and Metadata.
|
||||
*/
|
||||
let auth = 'mem';
|
||||
let data = 'file';
|
||||
let data = 'multiple';
|
||||
let metadata = 'file';
|
||||
let kms = 'file';
|
||||
if (process.env.S3BACKEND) {
|
||||
|
|
|
@ -94,7 +94,9 @@ function isObjAuthorized(bucket, objectMD, requestType, canonicalID) {
|
|||
|
||||
// User is already authorized on the bucket for FULL_CONTROL or WRITE or
|
||||
// bucket has canned ACL public-read-write
|
||||
if (requestType === 'objectPut' || requestType === 'objectDelete') {
|
||||
if (requestType === 'objectPut' || requestType === 'objectDelete' ||
|
||||
requestType === 'ReplicateObject') {
|
||||
// TODO remove ReplicateObject
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
|
||||
/*
|
||||
This code is based on code from https://github.com/olehch/sqltomongo
|
||||
with the following license:
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Oleh
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* A helper object to map SQL-like naming to MongoDB query syntax
|
||||
*/
|
||||
const exprMapper = {
|
||||
'=': '$eq',
|
||||
'<>': '$ne',
|
||||
'>': '$gt',
|
||||
'<': '$lt',
|
||||
'>=': '$gte',
|
||||
'<=': '$lte',
|
||||
'LIKE': '$regex',
|
||||
};
|
||||
|
||||
/*
|
||||
* Parses object with WHERE clause recursively
|
||||
* and generates MongoDB `find` query object
|
||||
*/
|
||||
function parseWhere(root) {
|
||||
const operator = Object.keys(root)[0];
|
||||
|
||||
// extract leaf binary expressions
|
||||
if (operator === 'AND') {
|
||||
const e1 = parseWhere(root[operator][0]);
|
||||
const e2 = parseWhere(root[operator][1]);
|
||||
|
||||
// eslint-disable-next-line
|
||||
return { '$and' : [
|
||||
e1,
|
||||
e2,
|
||||
] };
|
||||
} else if (operator === 'OR') {
|
||||
const e1 = parseWhere(root[operator][0]);
|
||||
const e2 = parseWhere(root[operator][1]);
|
||||
|
||||
// eslint-disable-next-line
|
||||
return { '$or' : [
|
||||
e1,
|
||||
e2,
|
||||
] };
|
||||
}
|
||||
const field = root[operator][0];
|
||||
const expr = exprMapper[operator];
|
||||
const obj = {};
|
||||
obj[`value.${field}`] = { [expr]: root[operator][1] };
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
module.exports = parseWhere;
|
|
@ -0,0 +1,64 @@
|
|||
const Parser = require('sql-where-parser');
|
||||
const parser = new Parser();
|
||||
const { errors } = require('arsenal');
|
||||
const objModel = require('arsenal').models.ObjectMD;
|
||||
|
||||
function _validateTree(whereClause, possibleAttributes) {
|
||||
let invalidAttribute;
|
||||
|
||||
function _searchTree(node) {
|
||||
const operator = Object.keys(node)[0];
|
||||
|
||||
if (operator === 'AND') {
|
||||
_searchTree(node[operator][0]);
|
||||
_searchTree(node[operator][1]);
|
||||
} else if (operator === 'OR') {
|
||||
_searchTree(node[operator][0]);
|
||||
_searchTree(node[operator][1]);
|
||||
} else {
|
||||
const field = node[operator][0];
|
||||
|
||||
if (!possibleAttributes[field] &&
|
||||
!field.startsWith('x-amz-meta-')) {
|
||||
invalidAttribute = field;
|
||||
}
|
||||
}
|
||||
}
|
||||
_searchTree(whereClause);
|
||||
return invalidAttribute;
|
||||
}
|
||||
|
||||
/**
|
||||
* validateSearchParams - validate value of ?search= in request
|
||||
* @param {string} searchParams - value of search params in request
|
||||
* which should be jsu sql where clause
|
||||
* For metadata: userMd.`x-amz-meta-color`=\"blue\"
|
||||
* For tags: tags.`x-amz-meta-color`=\"blue\"
|
||||
* For any other attribute: `content-length`=5
|
||||
* @return {undefined | error} undefined if validates or arsenal error if not
|
||||
*/
|
||||
function validateSearchParams(searchParams) {
|
||||
let ast;
|
||||
try {
|
||||
ast = parser.parse(searchParams);
|
||||
} catch (e) {
|
||||
if (e) {
|
||||
return errors.InvalidArgument
|
||||
.customizeDescription('Invalid sql where clause ' +
|
||||
'sent as search query');
|
||||
}
|
||||
}
|
||||
const possibleAttributes = objModel.getAttributes();
|
||||
const invalidAttribute = _validateTree(ast, possibleAttributes);
|
||||
if (invalidAttribute) {
|
||||
return {
|
||||
error: errors.InvalidArgument
|
||||
.customizeDescription('Search param ' +
|
||||
`contains unknown attribute: ${invalidAttribute}`) };
|
||||
}
|
||||
return {
|
||||
ast,
|
||||
};
|
||||
}
|
||||
|
||||
module.exports = validateSearchParams;
|
|
@ -1,17 +1,16 @@
|
|||
const querystring = require('querystring');
|
||||
const { errors, versioning, s3middleware } = require('arsenal');
|
||||
|
||||
const constants = require('../../constants');
|
||||
const services = require('../services');
|
||||
const { metadataValidateBucket } = require('../metadata/metadataUtils');
|
||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||
const escapeForXml = s3middleware.escapeForXml;
|
||||
const { pushMetric } = require('../utapi/utilities');
|
||||
|
||||
const validateSearchParams = require('../api/apiUtils/bucket/validateSearch');
|
||||
const parseWhere = require('../api/apiUtils/bucket/parseWhere');
|
||||
const versionIdUtils = versioning.VersionID;
|
||||
|
||||
// Sample XML response for GET bucket objects:
|
||||
/*
|
||||
/* Sample XML response for GET bucket objects:
|
||||
<ListBucketResult xmlns="http://s3.amazonaws.com/doc/2006-03-01/">
|
||||
<Name>example-bucket</Name>
|
||||
<Prefix></Prefix>
|
||||
|
@ -41,7 +40,6 @@ const versionIdUtils = versioning.VersionID;
|
|||
// http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETVersion.html#RESTBucketGET_Examples
|
||||
/*
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<ListVersionsResult xmlns="http://s3.amazonaws.com/doc/2006-03-01">
|
||||
<Name>bucket</Name>
|
||||
<Prefix>my</Prefix>
|
||||
|
@ -203,6 +201,22 @@ function processMasterVersions(bucketName, listParams, list) {
|
|||
return xml.join('');
|
||||
}
|
||||
|
||||
function handleResult(listParams, requestMaxKeys, encoding, authInfo,
|
||||
bucketName, list, corsHeaders, log, callback) {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
listParams.maxKeys = requestMaxKeys;
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
listParams.encoding = encoding;
|
||||
let res;
|
||||
if (listParams.listingType === 'DelimiterVersions') {
|
||||
res = processVersions(bucketName, listParams, list);
|
||||
} else {
|
||||
res = processMasterVersions(bucketName, listParams, list);
|
||||
}
|
||||
pushMetric('listBucket', log, { authInfo, bucket: bucketName });
|
||||
return callback(null, res, corsHeaders);
|
||||
}
|
||||
|
||||
/**
|
||||
* bucketGet - Return list of objects in bucket
|
||||
* @param {AuthInfo} authInfo - Instance of AuthInfo class with
|
||||
|
@ -227,6 +241,14 @@ function bucketGet(authInfo, request, log, callback) {
|
|||
if (Number.isNaN(requestMaxKeys) || requestMaxKeys < 0) {
|
||||
return callback(errors.InvalidArgument);
|
||||
}
|
||||
let validatedAst;
|
||||
if (params.search !== undefined) {
|
||||
const astOrError = validateSearchParams(params.search);
|
||||
if (astOrError.error) {
|
||||
return callback(astOrError.error);
|
||||
}
|
||||
validatedAst = astOrError.ast;
|
||||
}
|
||||
// AWS only returns 1000 keys even if max keys are greater.
|
||||
// Max keys stated in response xml can be greater than actual
|
||||
// keys returned.
|
||||
|
@ -259,22 +281,18 @@ function bucketGet(authInfo, request, log, callback) {
|
|||
listParams.versionIdMarker = params['version-id-marker'] ?
|
||||
versionIdUtils.decode(params['version-id-marker']) : undefined;
|
||||
}
|
||||
if (params.search !== undefined) {
|
||||
log.info('performaing search listing', { search: params.search });
|
||||
listParams.mongifiedSearch = parseWhere(validatedAst);
|
||||
}
|
||||
return services.getObjectListing(bucketName, listParams, log,
|
||||
(err, list) => {
|
||||
if (err) {
|
||||
log.debug('error processing request', { error: err });
|
||||
return callback(err, null, corsHeaders);
|
||||
}
|
||||
listParams.maxKeys = requestMaxKeys;
|
||||
listParams.encoding = encoding;
|
||||
let res;
|
||||
if (listParams.listingType === 'DelimiterVersions') {
|
||||
res = processVersions(bucketName, listParams, list);
|
||||
} else {
|
||||
res = processMasterVersions(bucketName, listParams, list);
|
||||
}
|
||||
pushMetric('listBucket', log, { authInfo, bucket: bucketName });
|
||||
return callback(null, res, corsHeaders);
|
||||
return handleResult(listParams, requestMaxKeys, encoding, authInfo,
|
||||
bucketName, list, corsHeaders, log, callback);
|
||||
});
|
||||
});
|
||||
return undefined;
|
||||
|
|
|
@ -117,7 +117,11 @@ function bucketPut(authInfo, request, log, callback) {
|
|||
}
|
||||
const { bucketName } = request;
|
||||
|
||||
if (request.bucketName === 'METADATA') {
|
||||
|
||||
if (request.bucketName === 'METADATA'
|
||||
// Note: for this to work with Vault, would need way to set
|
||||
// canonical ID to http://acs.zenko.io/accounts/service/clueso
|
||||
&& !authInfo.isRequesterThisServiceAccount('clueso')) {
|
||||
return callback(errors.AccessDenied
|
||||
.customizeDescription('The bucket METADATA is used ' +
|
||||
'for internal purposes'));
|
||||
|
|
|
@ -85,6 +85,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
|
|||
request, false, streamingV4Params, log, next);
|
||||
},
|
||||
], (err, storingResult) => {
|
||||
log.info('objectPut waterfall done');
|
||||
if (err) {
|
||||
return callback(err, responseHeaders);
|
||||
}
|
||||
|
@ -118,6 +119,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
|
|||
newByteLength,
|
||||
oldByteLength: isVersionedObj ? null : oldByteLength,
|
||||
});
|
||||
log.info('objectPut pushMetric done');
|
||||
return callback(null, responseHeaders);
|
||||
});
|
||||
});
|
||||
|
|
|
@ -1,17 +1,27 @@
|
|||
const serviceAccountPrefix =
|
||||
require('arsenal').constants.zenkoServiceAccount;
|
||||
|
||||
/** build simple authdata with only one account
|
||||
* @param {string} accessKey - account's accessKey
|
||||
* @param {string} secretKey - account's secretKey
|
||||
* @param {string} [serviceName] - service name to use to generate can id
|
||||
* @return {object} authdata - authdata with account's accessKey and secretKey
|
||||
*/
|
||||
function buildAuthDataAccount(accessKey, secretKey) {
|
||||
function buildAuthDataAccount(accessKey, secretKey, serviceName) {
|
||||
// TODO: remove specific check for clueso and generate unique
|
||||
// canonical id's for accounts
|
||||
const canonicalID = serviceName && serviceName === 'clueso' ?
|
||||
`${serviceAccountPrefix}/${serviceName}` : '12349df900b949e' +
|
||||
'55d96a1e698fbacedfd6e09d98eacf8f8d52' +
|
||||
'18e7cd47qwer';
|
||||
const shortid = '123456789012';
|
||||
return {
|
||||
accounts: [{
|
||||
name: 'CustomAccount',
|
||||
email: 'customaccount1@setbyenv.com',
|
||||
arn: 'arn:aws:iam::123456789012:root',
|
||||
canonicalID: '12349df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d52' +
|
||||
'18e7cd47qwer',
|
||||
shortid: '123456789012',
|
||||
arn: `arn:aws:iam::${shortid}:root`,
|
||||
canonicalID,
|
||||
shortid,
|
||||
keys: [{
|
||||
access: accessKey,
|
||||
secret: secretKey,
|
||||
|
|
|
@ -149,14 +149,30 @@ class AzureClient {
|
|||
.customizeDescription('Error returned from ' +
|
||||
`Azure: ${err.message}`));
|
||||
}
|
||||
return callback(null, azureKey);
|
||||
// XXX RACE CONDITION?
|
||||
return this._errorWrapper('put', 'createBlobSnapshot',
|
||||
[this._azureContainerName, azureKey, options,
|
||||
(err, snapshotName) => {
|
||||
if (err) {
|
||||
logHelper(log, 'error', 'err from Azure PUT ' +
|
||||
'data backend', err, this._dataStoreName);
|
||||
return callback(errors.ServiceUnavailable
|
||||
.customizeDescription('Error returned ' +
|
||||
`from Azure: ${err.message}`));
|
||||
}
|
||||
process.stdout.write('GOT AZURE VERSION ID' +
|
||||
`${snapshotName}\n`);
|
||||
return callback(null, azureKey, snapshotName);
|
||||
}], log, callback);
|
||||
}], log, callback);
|
||||
});
|
||||
}
|
||||
|
||||
head(objectGetInfo, reqUids, callback) {
|
||||
const log = createLogger(reqUids);
|
||||
const { key, azureStreamingOptions } = objectGetInfo;
|
||||
const { key, azureStreamingOptions, dataStoreVersionId } =
|
||||
objectGetInfo;
|
||||
azureStreamingOptions.snapshotId = dataStoreVersionId;
|
||||
return this._errorWrapper('head', 'getBlobProperties',
|
||||
[this._azureContainerName, key, azureStreamingOptions,
|
||||
err => {
|
||||
|
@ -182,7 +198,8 @@ class AzureClient {
|
|||
get(objectGetInfo, range, reqUids, callback) {
|
||||
const log = createLogger(reqUids);
|
||||
// for backwards compatibility
|
||||
const { key, response, azureStreamingOptions } = objectGetInfo;
|
||||
const { key, response, azureStreamingOptions,
|
||||
dataStoreVersionId } = objectGetInfo;
|
||||
let streamingOptions;
|
||||
if (azureStreamingOptions) {
|
||||
// option coming from api.get()
|
||||
|
@ -193,6 +210,8 @@ class AzureClient {
|
|||
const rangeEnd = range[1] ? range[1].toString() : undefined;
|
||||
streamingOptions = { rangeStart, rangeEnd };
|
||||
}
|
||||
process.stdout.write('AZURE VERSION GET', dataStoreVersionId);
|
||||
streamingOptions.snapshotId = dataStoreVersionId;
|
||||
this._errorWrapper('get', 'getBlobToStream',
|
||||
[this._azureContainerName, key, response, streamingOptions,
|
||||
err => {
|
||||
|
|
|
@ -6,6 +6,7 @@ const async = require('async');
|
|||
|
||||
const { config } = require('../Config');
|
||||
const parseLC = require('./locationConstraintParser');
|
||||
const DataFileBackend = require('./file/backend');
|
||||
|
||||
const { checkExternalBackend } = require('./external/utils');
|
||||
const { externalBackendHealthCheckInterval } = require('../../constants');
|
||||
|
@ -291,6 +292,10 @@ const multipleBackendGateway = {
|
|||
}
|
||||
return cb();
|
||||
},
|
||||
|
||||
getDiskUsage: (reqUids, callback) => {
|
||||
new DataFileBackend().getDiskUsage(reqUids, callback);
|
||||
},
|
||||
};
|
||||
|
||||
module.exports = multipleBackendGateway;
|
||||
|
|
|
@ -0,0 +1,460 @@
|
|||
const arsenal = require('arsenal');
|
||||
const async = require('async');
|
||||
const forge = require('node-forge');
|
||||
const request = require('request');
|
||||
|
||||
const { buildAuthDataAccount } = require('./auth/in_memory/builder');
|
||||
const metadata = require('./metadata/wrapper');
|
||||
const _config = require('./Config').config;
|
||||
const logger = require('./utilities/logger');
|
||||
|
||||
const managementEndpointRoot =
|
||||
process.env.MANAGEMENT_ENDPOINT ||
|
||||
'https://api.zenko.io';
|
||||
|
||||
const managementEndpoint = `${managementEndpointRoot}/api/v1/instance`;
|
||||
|
||||
const managementDatabaseName = 'PENSIEVE';
|
||||
const initRemoteManagementRetryDelay = 10000;
|
||||
const pushReportDelay = 30000;
|
||||
const pullConfigurationOverlayDelay = 10000;
|
||||
|
||||
const tokenRotationDelay = 3600 * 24 * 7 * 1000; // 7 days
|
||||
const tokenConfigurationKey = 'auth/zenko/remote-management-token';
|
||||
const latestOverlayVersionKey = 'configuration/overlay-version';
|
||||
const replicatorEndpoint = 'zenko-cloudserver-replicator';
|
||||
|
||||
function decryptSecret(instanceCredentials, secret) {
|
||||
// XXX don't forget to use u.encryptionKeyVersion if present
|
||||
const privateKey = forge.pki.privateKeyFromPem(
|
||||
instanceCredentials.privateKey);
|
||||
const encryptedSecretKey = forge.util.decode64(secret);
|
||||
return privateKey.decrypt(encryptedSecretKey, 'RSA-OAEP', {
|
||||
md: forge.md.sha256.create(),
|
||||
});
|
||||
}
|
||||
|
||||
function getStoredCredentials(instanceId, log, callback) {
|
||||
metadata.getObjectMD(managementDatabaseName, tokenConfigurationKey, {},
|
||||
log, callback);
|
||||
}
|
||||
|
||||
function patchConfiguration(instanceId, newConf, log, cb) {
|
||||
if (newConf.version === undefined) {
|
||||
log.debug('no remote configuration created yet');
|
||||
return process.nextTick(cb, null, newConf);
|
||||
}
|
||||
|
||||
if (_config.overlayVersion !== undefined &&
|
||||
newConf.version <= _config.overlayVersion) {
|
||||
log.debug('configuration version already applied',
|
||||
{ configurationVersion: newConf.version });
|
||||
return process.nextTick(cb, null, newConf);
|
||||
}
|
||||
|
||||
return getStoredCredentials(instanceId, log, (err, creds) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
|
||||
const accounts = [];
|
||||
if (newConf.users) {
|
||||
newConf.users.forEach(u => {
|
||||
if (u.secretKey && u.secretKey.length > 0) {
|
||||
const secretKey = decryptSecret(creds, u.secretKey);
|
||||
// accountType will be service-replication or service-clueso
|
||||
let serviceName;
|
||||
if (u.accountType && u.accountType.startsWith('service-')) {
|
||||
serviceName = u.accountType.split('-')[1];
|
||||
}
|
||||
const newAccount = buildAuthDataAccount(
|
||||
u.accessKey, secretKey, serviceName);
|
||||
accounts.push(newAccount.accounts[0]);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
const restEndpoints = Object.assign({}, _config.restEndpoints);
|
||||
if (newConf.endpoints) {
|
||||
newConf.endpoints.forEach(e => {
|
||||
restEndpoints[e.hostname] = e.locationName;
|
||||
});
|
||||
}
|
||||
|
||||
if (!restEndpoints[replicatorEndpoint]) {
|
||||
restEndpoints[replicatorEndpoint] = 'us-east-1';
|
||||
}
|
||||
|
||||
const locations = {};
|
||||
if (newConf.locations) {
|
||||
// Object.values() is apparently too recent
|
||||
Object.keys(newConf.locations || {}).forEach(k => {
|
||||
const l = newConf.locations[k];
|
||||
const location = {};
|
||||
switch (l.locationType) {
|
||||
case 'location-mem-v1':
|
||||
location.type = 'mem';
|
||||
break;
|
||||
case 'location-file-v1':
|
||||
location.type = 'file';
|
||||
break;
|
||||
case 'location-azure-v1':
|
||||
location.type = 'azure';
|
||||
if (l.details.secretKey && l.details.secretKey.length > 0) {
|
||||
location.details = {
|
||||
bucketMatch: l.details.bucketMatch,
|
||||
azureStorageEndpoint: l.details.endpoint,
|
||||
azureStorageAccountName: l.details.accessKey,
|
||||
azureStorageAccessKey: decryptSecret(creds,
|
||||
l.details.secretKey),
|
||||
azureContainerName: l.details.bucketName,
|
||||
};
|
||||
}
|
||||
break;
|
||||
case 'location-do-spaces-v1':
|
||||
case 'location-aws-s3-v1':
|
||||
case 'location-wasabi-v1':
|
||||
location.type = 'aws_s3';
|
||||
if (l.details.secretKey && l.details.secretKey.length > 0) {
|
||||
location.details = {
|
||||
credentials: {
|
||||
accessKey: l.details.accessKey,
|
||||
secretKey: decryptSecret(creds,
|
||||
l.details.secretKey),
|
||||
},
|
||||
bucketName: l.details.bucketName,
|
||||
bucketMatch: l.details.bucketMatch,
|
||||
serverSideEncryption:
|
||||
Boolean(l.details.serverSideEncryption),
|
||||
awsEndpoint: l.details.endpoint ||
|
||||
's3.amazonaws.com',
|
||||
};
|
||||
}
|
||||
break;
|
||||
case 'location-gcp-v1':
|
||||
location.type = 'gcp';
|
||||
if (l.details.secretKey && l.details.secretKey.length > 0) {
|
||||
location.details = {
|
||||
credentials: {
|
||||
accessKey: l.details.accessKey,
|
||||
secretKey: decryptSecret(creds,
|
||||
l.details.secretKey),
|
||||
},
|
||||
bucketName: l.details.bucketName,
|
||||
mpuBucketName: l.details.mpuBucketName,
|
||||
bucketMatch: l.details.bucketMatch,
|
||||
gcpEndpoint: l.details.endpoint ||
|
||||
'storage.googleapis.com',
|
||||
};
|
||||
}
|
||||
break;
|
||||
default:
|
||||
log.info('unknown location type', { locationType:
|
||||
l.locationType });
|
||||
return;
|
||||
}
|
||||
location.legacyAwsBehavior = Boolean(l.legacyAwsBehavior);
|
||||
locations[l.name] = location;
|
||||
});
|
||||
try {
|
||||
_config.setLocationConstraints(locations);
|
||||
} catch (error) {
|
||||
log.info('could not apply configuration version location ' +
|
||||
'constraints', { error });
|
||||
return cb(error);
|
||||
}
|
||||
}
|
||||
|
||||
_config.setAuthDataAccounts(accounts);
|
||||
_config.setRestEndpoints(restEndpoints);
|
||||
_config.overlayVersion = newConf.version;
|
||||
|
||||
log.info('applied configuration version',
|
||||
{ configurationVersion: _config.overlayVersion });
|
||||
|
||||
return cb(null, newConf);
|
||||
});
|
||||
}
|
||||
|
||||
function loadRemoteOverlay(instanceId, remoteToken, cachedOverlay, log, cb) {
|
||||
log.debug('loading remote overlay');
|
||||
const opts = {
|
||||
headers: {
|
||||
'x-instance-authentication-token': remoteToken,
|
||||
'x-scal-request-id': log.getSerializedUids(),
|
||||
},
|
||||
};
|
||||
request(`${managementEndpoint}/${instanceId}/config/overlay`, opts,
|
||||
(error, response, body) => {
|
||||
if (error) {
|
||||
return cb(error);
|
||||
}
|
||||
if (response.statusCode === 200) {
|
||||
return cb(null, cachedOverlay, body);
|
||||
}
|
||||
if (response.statusCode === 404) {
|
||||
return cb(null, cachedOverlay, {});
|
||||
}
|
||||
return cb(arsenal.errors.AccessForbidden, cachedOverlay, {});
|
||||
}).json();
|
||||
}
|
||||
|
||||
function loadCachedOverlay(log, callback) {
|
||||
return metadata.getObjectMD(managementDatabaseName,
|
||||
latestOverlayVersionKey, {}, log, (err, version) => {
|
||||
if (err) {
|
||||
if (err.NoSuchKey) {
|
||||
return process.nextTick(callback, null, {});
|
||||
}
|
||||
return callback(err);
|
||||
}
|
||||
return metadata.getObjectMD(managementDatabaseName,
|
||||
`configuration/overlay/${version}`, {}, log, (err, conf) => {
|
||||
if (err) {
|
||||
if (err.NoSuchKey) {
|
||||
return process.nextTick(callback, null, {});
|
||||
}
|
||||
return callback(err);
|
||||
}
|
||||
return callback(null, conf);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
function overlayHasVersion(overlay) {
|
||||
return overlay && overlay.version !== undefined;
|
||||
}
|
||||
|
||||
function remoteOverlayIsNewer(cachedOverlay, remoteOverlay) {
|
||||
return (overlayHasVersion(remoteOverlay) &&
|
||||
(!overlayHasVersion(cachedOverlay) ||
|
||||
remoteOverlay.version > cachedOverlay.version));
|
||||
}
|
||||
|
||||
function saveConfigurationVersion(cachedOverlay, remoteOverlay, log, cb) {
|
||||
if (remoteOverlayIsNewer(cachedOverlay, remoteOverlay)) {
|
||||
const objName = `configuration/overlay/${remoteOverlay.version}`;
|
||||
metadata.putObjectMD(managementDatabaseName, objName, remoteOverlay,
|
||||
{}, log, error => {
|
||||
if (error) {
|
||||
log.error('could not save configuration version',
|
||||
{ configurationVersion: remoteOverlay.version });
|
||||
}
|
||||
metadata.putObjectMD(managementDatabaseName,
|
||||
latestOverlayVersionKey, remoteOverlay.version, {}, log,
|
||||
error => cb(error, remoteOverlay));
|
||||
});
|
||||
} else {
|
||||
log.debug('no remote configuration to cache yet');
|
||||
process.nextTick(cb, null, remoteOverlay);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO save only after successful patch
|
||||
function applyConfigurationOverlay(instanceId, remoteToken, log, cb) {
|
||||
async.waterfall([
|
||||
wcb => loadCachedOverlay(log, wcb),
|
||||
(cachedOverlay, wcb) => patchConfiguration(instanceId, cachedOverlay,
|
||||
log, wcb),
|
||||
(cachedOverlay, wcb) =>
|
||||
loadRemoteOverlay(instanceId, remoteToken, cachedOverlay, log, wcb),
|
||||
(cachedOverlay, remoteOverlay, wcb) =>
|
||||
saveConfigurationVersion(cachedOverlay, remoteOverlay, log, wcb),
|
||||
(remoteOverlay, wcb) => patchConfiguration(instanceId, remoteOverlay,
|
||||
log, wcb),
|
||||
], error => {
|
||||
if (error) {
|
||||
log.error('could not apply managed configuration', { error });
|
||||
}
|
||||
if (cb) {
|
||||
cb(null, instanceId, remoteToken);
|
||||
}
|
||||
setTimeout(applyConfigurationOverlay, pullConfigurationOverlayDelay,
|
||||
instanceId, remoteToken, logger.newRequestLogger());
|
||||
});
|
||||
}
|
||||
|
||||
function getStats() {
|
||||
const fromURL = `http://localhost:${_config.port}/_/report`;
|
||||
const fromOptions = {
|
||||
headers: {
|
||||
'x-scal-report-token': process.env.REPORT_TOKEN,
|
||||
},
|
||||
};
|
||||
return request(fromURL, fromOptions).json();
|
||||
}
|
||||
|
||||
function postStats(instanceId, remoteToken, next) {
|
||||
const toURL = `${managementEndpoint}/${instanceId}/stats`;
|
||||
const toOptions = {
|
||||
headers: {
|
||||
'x-instance-authentication-token': remoteToken,
|
||||
},
|
||||
};
|
||||
const toCallback = (err, response, body) => {
|
||||
if (err) {
|
||||
process.stdout.write(`STAT PUSH ERR ${err}\n`);
|
||||
}
|
||||
if (response && response.statusCode !== 201) {
|
||||
process.stdout.write(`STAT PUSH ERR ${response.statusCode} ` +
|
||||
`${body}\n`);
|
||||
}
|
||||
if (next) {
|
||||
next(null, instanceId, remoteToken);
|
||||
}
|
||||
};
|
||||
return request.post(toURL, toOptions, toCallback).json();
|
||||
}
|
||||
|
||||
function pushStats(instanceId, remoteToken, next) {
|
||||
getStats().pipe(postStats(instanceId, remoteToken, next));
|
||||
setTimeout(pushStats, pushReportDelay, instanceId, remoteToken);
|
||||
}
|
||||
|
||||
function issueCredentials(instanceId, log, callback) {
|
||||
log.info('registering with API to get token');
|
||||
|
||||
const keyPair = forge.pki.rsa.generateKeyPair({ bits: 2048, e: 0x10001 });
|
||||
const privateKey = forge.pki.privateKeyToPem(keyPair.privateKey);
|
||||
const publicKey = forge.pki.publicKeyToPem(keyPair.publicKey);
|
||||
|
||||
const postData = {
|
||||
publicKey,
|
||||
};
|
||||
|
||||
request.post(`${managementEndpoint}/${instanceId}/register`,
|
||||
(error, response, body) => {
|
||||
if (error) {
|
||||
return callback(error);
|
||||
}
|
||||
if (response.statusCode !== 201) {
|
||||
log.error('could not register instance', {
|
||||
statusCode: response.statusCode,
|
||||
});
|
||||
return callback(arsenal.errors.InternalError);
|
||||
}
|
||||
/* eslint-disable no-param-reassign */
|
||||
body.privateKey = privateKey;
|
||||
/* eslint-enable no-param-reassign */
|
||||
return callback(null, body);
|
||||
}).json(postData);
|
||||
}
|
||||
|
||||
function confirmInstanceCredentials(instanceId, creds, log, callback) {
|
||||
const opts = {
|
||||
headers: {
|
||||
'x-instance-authentication-token': creds.token,
|
||||
},
|
||||
};
|
||||
const postData = {
|
||||
serial: creds.serial || 0,
|
||||
publicKey: creds.publicKey,
|
||||
};
|
||||
request.post(`${managementEndpoint}/${instanceId}/confirm`,
|
||||
opts, (error, response) => {
|
||||
if (error) {
|
||||
return callback(error);
|
||||
}
|
||||
if (response.statusCode === 200) {
|
||||
return callback(null, instanceId, creds.token);
|
||||
}
|
||||
return callback(arsenal.errors.InternalError);
|
||||
}).json(postData);
|
||||
}
|
||||
|
||||
function initManagementCredentials(instanceId, log, callback) {
|
||||
getStoredCredentials(instanceId, log, (error, value) => {
|
||||
if (error) {
|
||||
if (error.NoSuchKey) {
|
||||
return issueCredentials(instanceId, log, (error, value) => {
|
||||
if (error) {
|
||||
log.error('could not issue token', { error });
|
||||
return callback(error);
|
||||
}
|
||||
log.debug('saving token');
|
||||
return metadata.putObjectMD(managementDatabaseName,
|
||||
tokenConfigurationKey, value, {}, log, error => {
|
||||
if (error) {
|
||||
log.error('could not save token',
|
||||
{ error });
|
||||
return callback(error);
|
||||
}
|
||||
log.info('saved token locally, ' +
|
||||
'confirming instance');
|
||||
return confirmInstanceCredentials(
|
||||
instanceId, value, log, callback);
|
||||
});
|
||||
});
|
||||
}
|
||||
log.debug('could not get token', { error });
|
||||
return callback(error);
|
||||
}
|
||||
|
||||
log.info('returning existing token');
|
||||
if (Date.now() - value.issueDate > tokenRotationDelay) {
|
||||
log.warn('management API token is too old, should re-issue');
|
||||
}
|
||||
|
||||
return callback(null, instanceId, value.token);
|
||||
});
|
||||
}
|
||||
|
||||
function initManagementDatabase(log, callback) {
|
||||
// XXX choose proper owner names
|
||||
const md = new arsenal.models.BucketInfo(managementDatabaseName, 'owner',
|
||||
'owner display name', new Date().toJSON());
|
||||
|
||||
metadata.createBucket(managementDatabaseName, md, log, error => {
|
||||
if (error) {
|
||||
if (error.BucketAlreadyExists) {
|
||||
log.info('created management database');
|
||||
return callback();
|
||||
}
|
||||
log.error('could not initialize management database',
|
||||
{ error });
|
||||
return callback(error);
|
||||
}
|
||||
log.info('initialized management database');
|
||||
return callback();
|
||||
});
|
||||
}
|
||||
|
||||
function initManagement(log) {
|
||||
if ((process.env.REMOTE_MANAGEMENT_DISABLE &&
|
||||
process.env.REMOTE_MANAGEMENT_DISABLE !== '0')
|
||||
|| process.env.S3BACKEND === 'mem') {
|
||||
log.info('remote management disabled');
|
||||
return;
|
||||
}
|
||||
async.waterfall([
|
||||
cb => initManagementDatabase(log, cb),
|
||||
cb => metadata.getUUID(log, cb),
|
||||
(instanceId, cb) => initManagementCredentials(instanceId, log, cb),
|
||||
(instanceId, token, cb) =>
|
||||
applyConfigurationOverlay(instanceId, token, log, cb),
|
||||
(instanceId, token, cb) => pushStats(instanceId, token, cb),
|
||||
(instanceId, token, cb) => {
|
||||
metadata.notifyBucketChange(() => {
|
||||
pushStats(instanceId, token);
|
||||
});
|
||||
process.nextTick(cb);
|
||||
},
|
||||
], error => {
|
||||
if (error) {
|
||||
log.error('could not initialize remote management, retrying later',
|
||||
{ error });
|
||||
setTimeout(initManagement,
|
||||
initRemoteManagementRetryDelay,
|
||||
logger.newRequestLogger());
|
||||
} else {
|
||||
metadata.getUUID(log, (err, instanceId) => {
|
||||
log.info(`this deployment's Instance ID is ${instanceId}`);
|
||||
log.end('management init done');
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
initManagement,
|
||||
};
|
|
@ -1,5 +1,6 @@
|
|||
const cluster = require('cluster');
|
||||
const arsenal = require('arsenal');
|
||||
const async = require('async');
|
||||
|
||||
const logger = require('../../utilities/logger');
|
||||
const BucketInfo = arsenal.models.BucketInfo;
|
||||
|
@ -12,6 +13,8 @@ const versionSep = arsenal.versioning.VersioningConstants.VersionId.Separator;
|
|||
|
||||
const METASTORE = '__metastore';
|
||||
|
||||
const itemScanRefreshDelay = 1000 * 30 * 60; // 30 minutes
|
||||
|
||||
class BucketFileInterface {
|
||||
|
||||
/**
|
||||
|
@ -37,6 +40,9 @@ class BucketFileInterface {
|
|||
this.setupMetadataServer();
|
||||
}
|
||||
});
|
||||
|
||||
this.lastItemScanTime = null;
|
||||
this.lastItemScanResult = null;
|
||||
}
|
||||
|
||||
setupMetadataServer() {
|
||||
|
@ -89,6 +95,7 @@ class BucketFileInterface {
|
|||
if (err === undefined) {
|
||||
return cb(errors.BucketAlreadyExists);
|
||||
}
|
||||
this.lastItemScanTime = null;
|
||||
this.putBucketAttributes(bucketName,
|
||||
bucketMD,
|
||||
log, cb);
|
||||
|
@ -180,6 +187,7 @@ class BucketFileInterface {
|
|||
logObj);
|
||||
return cb(errors.InternalError);
|
||||
}
|
||||
this.lastItemScanTime = null;
|
||||
return cb();
|
||||
});
|
||||
return undefined;
|
||||
|
@ -324,6 +332,11 @@ class BucketFileInterface {
|
|||
}
|
||||
|
||||
countItems(log, cb) {
|
||||
if (this.lastItemScanTime !== null &&
|
||||
(Date.now() - this.lastItemScanTime) <= itemScanRefreshDelay) {
|
||||
return process.nextTick(cb, null, this.lastItemScanResult);
|
||||
}
|
||||
|
||||
const params = {};
|
||||
const extension = new arsenal.algorithms.list.Basic(params, log);
|
||||
const requestParams = extension.genMDParams();
|
||||
|
@ -332,6 +345,7 @@ class BucketFileInterface {
|
|||
objects: 0,
|
||||
versions: 0,
|
||||
buckets: 0,
|
||||
bucketList: [],
|
||||
};
|
||||
let cbDone = false;
|
||||
|
||||
|
@ -344,9 +358,12 @@ class BucketFileInterface {
|
|||
if (!e.includes(METASTORE)) {
|
||||
if (e.includes(constants.usersBucket)) {
|
||||
res.buckets++;
|
||||
res.bucketList.push({
|
||||
name: e.split(constants.splitter)[1],
|
||||
});
|
||||
} else if (e.includes(versionSep)) {
|
||||
res.versions++;
|
||||
} else {
|
||||
} else if (!e.includes('..recordLogs#s3-recordlog')) {
|
||||
res.objects++;
|
||||
}
|
||||
}
|
||||
|
@ -366,7 +383,25 @@ class BucketFileInterface {
|
|||
.on('end', () => {
|
||||
if (!cbDone) {
|
||||
cbDone = true;
|
||||
return cb(null, res);
|
||||
async.eachSeries(res.bucketList, (bucket, cb) => {
|
||||
this.getBucketAttributes(bucket.name, log,
|
||||
(err, bucketInfo) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
/* eslint-disable no-param-reassign */
|
||||
bucket.location =
|
||||
bucketInfo.getLocationConstraint();
|
||||
/* eslint-enable no-param-reassign */
|
||||
return cb();
|
||||
});
|
||||
}, err => {
|
||||
if (!err) {
|
||||
this.lastItemScanTime = Date.now();
|
||||
this.lastItemScanResult = res;
|
||||
}
|
||||
return cb(err, res);
|
||||
});
|
||||
}
|
||||
return undefined;
|
||||
});
|
||||
|
|
|
@ -235,6 +235,7 @@ function putData(request, response, bucketInfo, objMd, log, callback) {
|
|||
owner: canonicalID,
|
||||
namespace: NAMESPACE,
|
||||
objectKey: request.objectKey,
|
||||
metaHeaders: {},
|
||||
};
|
||||
const payloadLen = parseInt(request.headers['content-length'], 10);
|
||||
const backendInfoObj = locationConstraintCheck(
|
||||
|
|
|
@ -11,6 +11,7 @@ const _config = require('./Config').config;
|
|||
const { blacklistedPrefixes } = require('../constants');
|
||||
const api = require('./api/api');
|
||||
const data = require('./data/wrapper');
|
||||
const { initManagement } = require('./management');
|
||||
|
||||
const routes = arsenal.s3routes.routes;
|
||||
const websiteEndpoints = _config.websiteEndpoints;
|
||||
|
@ -36,6 +37,7 @@ const STATS_INTERVAL = 5; // 5 seconds
|
|||
const STATS_EXPIRY = 30; // 30 seconds
|
||||
const statsClient = new StatsClient(localCacheClient, STATS_INTERVAL,
|
||||
STATS_EXPIRY);
|
||||
const enableRemoteManagement = true;
|
||||
|
||||
class S3Server {
|
||||
/**
|
||||
|
@ -74,6 +76,11 @@ class S3Server {
|
|||
// eslint-disable-next-line no-param-reassign
|
||||
res.isclosed = true;
|
||||
});
|
||||
// use proxied hostname if needed
|
||||
if (req.headers['x-target-host']) {
|
||||
// eslint-disable-next-line no-param-reassign
|
||||
req.headers.host = req.headers['x-target-host'];
|
||||
}
|
||||
const params = {
|
||||
api,
|
||||
internalHandlers,
|
||||
|
@ -136,6 +143,14 @@ class S3Server {
|
|||
} else {
|
||||
this.server.listen(port);
|
||||
}
|
||||
|
||||
// TODO this should wait for metadata healthcheck to be ok
|
||||
// TODO only do this in cluster master
|
||||
setTimeout(() => {
|
||||
if (enableRemoteManagement) {
|
||||
initManagement(logger.newRequestLogger());
|
||||
}
|
||||
}, 5000);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -11,10 +11,9 @@ const metadata = require('../metadata/wrapper');
|
|||
const REPORT_MODEL_VERSION = 1;
|
||||
|
||||
function cleanup(obj) {
|
||||
const ret = JSON.parse(JSON.stringify(obj));
|
||||
delete ret.authData;
|
||||
delete ret.reportToken;
|
||||
return ret;
|
||||
return {
|
||||
overlayVersion: obj.overlayVersion,
|
||||
};
|
||||
}
|
||||
|
||||
function isAuthorized(clientIP, req) {
|
||||
|
@ -22,26 +21,6 @@ function isAuthorized(clientIP, req) {
|
|||
req.headers['x-scal-report-token'] === config.reportToken;
|
||||
}
|
||||
|
||||
const itemScanRefreshDelay = 1000 * 30 * 60; // 30 minutes
|
||||
let lastItemScanTime;
|
||||
let lastItemScanResult;
|
||||
|
||||
function countItems(log, cb) {
|
||||
// The value is cached, as scanning all the dataset can be I/O intensive
|
||||
if (lastItemScanTime === undefined ||
|
||||
(Date.now() - lastItemScanTime) > itemScanRefreshDelay) {
|
||||
return metadata.countItems(log, (err, res) => {
|
||||
if (err) {
|
||||
return cb(err);
|
||||
}
|
||||
lastItemScanTime = Date.now();
|
||||
lastItemScanResult = res;
|
||||
return cb(null, res);
|
||||
});
|
||||
}
|
||||
return process.nextTick(cb, null, lastItemScanResult);
|
||||
}
|
||||
|
||||
function getGitVersion(cb) {
|
||||
fs.readFile('.git/HEAD', 'ascii', (err, val) => {
|
||||
if (err && err.code === 'ENOENT') {
|
||||
|
@ -117,7 +96,7 @@ function reportHandler(clientIP, req, res, log) {
|
|||
getMDDiskUsage: cb => metadata.getDiskUsage(log, cb),
|
||||
getDataDiskUsage: cb => data.getDiskUsage(log, cb),
|
||||
getVersion: cb => getGitVersion(cb),
|
||||
getObjectCount: cb => countItems(log, cb),
|
||||
getObjectCount: cb => metadata.countItems(log, cb),
|
||||
},
|
||||
(err, results) => {
|
||||
if (err) {
|
||||
|
@ -141,7 +120,7 @@ function reportHandler(clientIP, req, res, log) {
|
|||
|
||||
res.writeHead(200, { 'Content-Type': 'application/json' });
|
||||
res.write(JSON.stringify(response));
|
||||
log.end('report handler finished');
|
||||
log.end().debug('report handler finished');
|
||||
}
|
||||
res.end();
|
||||
});
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "s3",
|
||||
"version": "7.4.0",
|
||||
"version": "7.4.0-orbit-test",
|
||||
"description": "S3 connector",
|
||||
"main": "index.js",
|
||||
"engines": {
|
||||
|
@ -20,15 +20,18 @@
|
|||
"homepage": "https://github.com/scality/S3#readme",
|
||||
"dependencies": {
|
||||
"aws-sdk": "2.28.0",
|
||||
"arsenal": "scality/Arsenal",
|
||||
"arsenal": "scality/Arsenal#forward/orbit",
|
||||
"async": "~2.5.0",
|
||||
"google-auto-auth": "^0.9.1",
|
||||
"azure-storage": "^2.1.0",
|
||||
"bucketclient": "scality/bucketclient",
|
||||
"commander": "^2.9.0",
|
||||
"node-forge": "^0.7.1",
|
||||
"node-uuid": "^1.4.3",
|
||||
"npm-run-all": "~4.0.2",
|
||||
"request": "^2.81.0",
|
||||
"sproxydclient": "scality/sproxydclient",
|
||||
"sql-where-parser": "~2.2.1",
|
||||
"utapi": "scality/utapi",
|
||||
"utf8": "~2.1.1",
|
||||
"uuid": "^3.0.1",
|
||||
|
|
|
@ -85,6 +85,15 @@ describe('Report route', () => {
|
|||
});
|
||||
});
|
||||
|
||||
it('should remove unwanted sections from config', done => {
|
||||
queryReport(done, response => {
|
||||
if (response.config && response.config.mongodb) {
|
||||
return done(new Error('config contains unwanted sections'));
|
||||
}
|
||||
return done();
|
||||
});
|
||||
});
|
||||
|
||||
it('should remove report token from config', done => {
|
||||
queryReport(done, response => {
|
||||
if (response.config && response.config.reportToken) {
|
||||
|
|
|
@ -29,8 +29,8 @@ const expectedLifecycleConfig = {
|
|||
},
|
||||
actions: [
|
||||
{
|
||||
actionName: 'Expiration',
|
||||
days: 365,
|
||||
actionName: 'AbortIncompleteMultipartUpload',
|
||||
days: 30,
|
||||
},
|
||||
],
|
||||
},
|
||||
|
@ -44,6 +44,10 @@ const expectedLifecycleConfig = {
|
|||
key: 'test-key1',
|
||||
val: 'test-value1',
|
||||
},
|
||||
{
|
||||
key: 'test-key2',
|
||||
val: 'test-value2',
|
||||
},
|
||||
],
|
||||
},
|
||||
actions: [
|
||||
|
@ -53,6 +57,25 @@ const expectedLifecycleConfig = {
|
|||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
ruleID: 'test-id3',
|
||||
ruleStatus: 'Disabled',
|
||||
filter: {
|
||||
rulePrefix: '',
|
||||
tags: [
|
||||
{
|
||||
key: 'test-key1',
|
||||
val: 'test-value1',
|
||||
},
|
||||
],
|
||||
},
|
||||
actions: [
|
||||
{
|
||||
actionName: 'Expiration',
|
||||
days: 365,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
|
|
|
@ -14,24 +14,32 @@ function getLifecycleRequest(bucketName, xml) {
|
|||
function getLifecycleXml() {
|
||||
const id1 = 'test-id1';
|
||||
const id2 = 'test-id2';
|
||||
const id3 = 'test-id3';
|
||||
const prefix = 'test-prefix';
|
||||
const tags = [
|
||||
{
|
||||
key: 'test-key1',
|
||||
value: 'test-value1',
|
||||
},
|
||||
{
|
||||
key: 'test-key2',
|
||||
value: 'test-value2',
|
||||
},
|
||||
];
|
||||
const action1 = 'Expiration';
|
||||
const days1 = 365;
|
||||
const action2 = 'NoncurrentVersionExpiration';
|
||||
const days2 = 1;
|
||||
const action3 = 'AbortIncompleteMultipartUpload';
|
||||
const days3 = 30;
|
||||
return '<LifecycleConfiguration ' +
|
||||
'xmlns="http://s3.amazonaws.com/doc/2006-03-01/">' +
|
||||
'<Rule>' +
|
||||
`<ID>${id1}</ID>` +
|
||||
'<Status>Enabled</Status>' +
|
||||
`<Prefix>${prefix}</Prefix>` +
|
||||
`<${action1}><Days>${days1}</Days></${action1}>` +
|
||||
`<${action3}><DaysAfterInitiation>${days3}` +
|
||||
`</DaysAfterInitiation></${action3}>` +
|
||||
'</Rule>' +
|
||||
'<Rule>' +
|
||||
`<ID>${id2}</ID>` +
|
||||
|
@ -39,10 +47,19 @@ function getLifecycleXml() {
|
|||
'<Filter><And>' +
|
||||
`<Prefix>${prefix}</Prefix>` +
|
||||
`<Tag><Key>${tags[0].key}</Key>` +
|
||||
`<Value>${tags[0].value}</Value></Tag>` +
|
||||
`<Value>${tags[0].value}</Value>` +
|
||||
`<Key>${tags[1].key}</Key>` +
|
||||
`<Value>${tags[1].value}</Value></Tag>` +
|
||||
'</And></Filter>' +
|
||||
`<${action2}><NoncurrentDays>${days2}</NoncurrentDays></${action2}>` +
|
||||
'</Rule>' +
|
||||
'<Rule>' +
|
||||
`<ID>${id3}</ID>` +
|
||||
'<Status>Disabled</Status>' +
|
||||
`<Filter><Tag><Key>${tags[0].key}</Key>` +
|
||||
`<Value>${tags[0].value}</Value></Tag></Filter>` +
|
||||
`<${action1}><Days>${days1}</Days></${action1}>` +
|
||||
'</Rule>' +
|
||||
'</LifecycleConfiguration>';
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,122 @@
|
|||
const assert = require('assert');
|
||||
const { errors } = require('arsenal');
|
||||
const validateSearch =
|
||||
require('../../../lib/api/apiUtils/bucket/validateSearch');
|
||||
|
||||
|
||||
describe('validate search where clause', () => {
|
||||
const tests = [
|
||||
{
|
||||
it: 'should allow a valid simple search with table attribute',
|
||||
searchParams: '`x-amz-meta-dog`="labrador"',
|
||||
result: undefined,
|
||||
},
|
||||
{
|
||||
it: 'should allow a simple search with known ' +
|
||||
'column attribute',
|
||||
searchParams: '`content-length`="10"',
|
||||
result: undefined,
|
||||
},
|
||||
{
|
||||
it: 'should allow valid search with AND',
|
||||
searchParams: '`x-amz-meta-dog`="labrador" ' +
|
||||
'AND `x-amz-meta-age`="5"',
|
||||
result: undefined,
|
||||
},
|
||||
{
|
||||
it: 'should allow valid search with OR',
|
||||
searchParams: '`x-amz-meta-dog`="labrador" ' +
|
||||
'OR `x-amz-meta-age`="5"',
|
||||
result: undefined,
|
||||
},
|
||||
{
|
||||
it: 'should allow valid search with double AND',
|
||||
searchParams: '`x-amz-meta-dog`="labrador" ' +
|
||||
'AND `x-amz-meta-age`="5" ' +
|
||||
'AND `x-amz-meta-whatever`="ok"',
|
||||
result: undefined,
|
||||
},
|
||||
{
|
||||
it: 'should allow valid chained search with tables and columns',
|
||||
searchParams: '`x-amz-meta-dog`="labrador" ' +
|
||||
'AND `x-amz-meta-age`="5" ' +
|
||||
'AND `content-length`="10"' +
|
||||
'OR isDeleteMarker="true"' +
|
||||
'AND `x-amz-meta-whatever`="ok"',
|
||||
result: undefined,
|
||||
},
|
||||
{
|
||||
it: 'should allow valid LIKE search',
|
||||
searchParams: '`x-amz-meta-dog` LIKE "lab%" ' +
|
||||
'AND `x-amz-meta-age` LIKE "5%" ' +
|
||||
'AND `content-length`="10"',
|
||||
result: undefined,
|
||||
},
|
||||
{
|
||||
it: 'should disallow a LIKE search with invalid attribute',
|
||||
searchParams: '`x-zma-meta-dog` LIKE "labrador"',
|
||||
result: errors.InvalidArgument.customizeDescription('Search ' +
|
||||
'param contains unknown attribute: x-zma-meta-dog'),
|
||||
},
|
||||
{
|
||||
it: 'should disallow a simple search with unknown attribute',
|
||||
searchParams: '`x-zma-meta-dog`="labrador"',
|
||||
result: errors.InvalidArgument.customizeDescription('Search ' +
|
||||
'param contains unknown attribute: x-zma-meta-dog'),
|
||||
},
|
||||
{
|
||||
it: 'should disallow a compound search with unknown ' +
|
||||
'attribute on right',
|
||||
searchParams: '`x-amz-meta-dog`="labrador" AND ' +
|
||||
'`x-zma-meta-dog`="labrador"',
|
||||
result: errors.InvalidArgument.customizeDescription('Search ' +
|
||||
'param contains unknown attribute: x-zma-meta-dog'),
|
||||
},
|
||||
{
|
||||
it: 'should disallow a compound search with unknown ' +
|
||||
'attribute on left',
|
||||
searchParams: '`x-zma-meta-dog`="labrador" AND ' +
|
||||
'`x-amz-meta-dog`="labrador"',
|
||||
result: errors.InvalidArgument.customizeDescription('Search ' +
|
||||
'param contains unknown attribute: x-zma-meta-dog'),
|
||||
},
|
||||
{
|
||||
it: 'should disallow a chained search with one invalid ' +
|
||||
'table attribute',
|
||||
searchParams: '`x-amz-meta-dog`="labrador" ' +
|
||||
'AND `x-amz-meta-age`="5" ' +
|
||||
'OR `x-zma-meta-whatever`="ok"',
|
||||
result: errors.InvalidArgument.customizeDescription('Search ' +
|
||||
'param contains unknown attribute: x-zma-meta-whatever'),
|
||||
},
|
||||
{
|
||||
it: 'should disallow a simple search with unknown ' +
|
||||
'column attribute',
|
||||
searchParams: 'whatever="labrador"',
|
||||
result: errors.InvalidArgument.customizeDescription('Search ' +
|
||||
'param contains unknown attribute: whatever'),
|
||||
},
|
||||
{
|
||||
it: 'should disallow a chained search with one invalid ' +
|
||||
'column attribute',
|
||||
searchParams: '`x-amz-meta-dog`="labrador" ' +
|
||||
'AND `x-amz-meta-age`="5" ' +
|
||||
'OR madeUp="something"' +
|
||||
'OR `x-amz-meta-whatever`="ok"',
|
||||
result: errors.InvalidArgument.customizeDescription('Search ' +
|
||||
'param contains unknown attribute: madeUp'),
|
||||
},
|
||||
];
|
||||
|
||||
tests.forEach(test => {
|
||||
it(test.it, () => {
|
||||
const actualResult =
|
||||
validateSearch(test.searchParams);
|
||||
if (test.result === undefined) {
|
||||
assert(typeof actualResult.ast === 'object');
|
||||
} else {
|
||||
assert.deepStrictEqual(actualResult.error, test.result);
|
||||
}
|
||||
});
|
||||
});
|
||||
});
|
Loading…
Reference in New Issue