Compare commits

..

1 Commits

Author SHA1 Message Date
williamlardier 59f946742c add logs 2024-05-16 17:15:52 +02:00
56 changed files with 8862 additions and 1008 deletions

View File

@ -20,16 +20,13 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Render and test ${{ matrix.tests.name }}
uses: scality/action-prom-render-test@1.0.3
uses: scality/action-prom-render-test@1.0.1
with:
alert_file_path: monitoring/alerts.yaml
test_file_path: ${{ matrix.tests.file }}
alert_inputs: |
namespace=zenko
service=artesca-data-connector-s3api-metrics
reportJob=artesca-data-ops-report-handler
replicas=3
alert_inputs: >-
namespace=zenko,service=artesca-data-connector-s3api-metrics,replicas=3
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -67,8 +67,7 @@ env:
ENABLE_LOCAL_CACHE: "true"
REPORT_TOKEN: "report-token-1"
REMOTE_MANAGEMENT_DISABLE: "1"
# https://github.com/git-lfs/git-lfs/issues/5749
GIT_CLONE_PROTECTION_ACTIVE: 'false'
jobs:
linting-coverage:
runs-on: ubuntu-latest
@ -150,9 +149,6 @@ jobs:
provenance: false
tags: |
ghcr.io/${{ github.repository }}:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=cloudserver
cache-to: type=gha,mode=max,scope=cloudserver
- name: Build and push pykmip image
@ -162,9 +158,6 @@ jobs:
context: .github/pykmip
tags: |
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=pykmip
cache-to: type=gha,mode=max,scope=pykmip
- name: Build and push MongoDB
@ -344,7 +337,7 @@ jobs:
needs: build
env:
ENABLE_UTAPI_V2: t
S3BACKEND: mem
S3BACKEND: mem
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}

175
README.md
View File

@ -1,7 +1,10 @@
# Zenko CloudServer with Vitastor Backend
# Zenko CloudServer
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/zenko/cloudserver)
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
## Overview
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
@ -11,71 +14,137 @@ Scalitys Open Source Multi-Cloud Data Controller.
CloudServer provides a single AWS S3 API interface to access multiple
backend data storage both on-premise or public in the cloud.
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
backend support.
CloudServer is useful for Developers, either to run as part of a
continous integration test environment to emulate the AWS S3 service locally
or as an abstraction layer to develop object storage enabled
application on the go.
## Quick Start with Vitastor
## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
Vitastor Backend is in experimental status, however you can already try to
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
it works too 😊.
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
Installation instructions:
## Docker
### Install Vitastor
[Run your Zenko CloudServer with Docker](https://hub.docker.com/r/zenko/cloudserver/)
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
## Contributing
### Install Zenko with Vitastor Backend
In order to contribute, please follow the
[Contributing Guidelines](
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
- Install dependencies: `npm install --omit dev` or just `npm install`
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
## Installation
### Install and Configure MongoDB
### Dependencies
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x
. Up-to-date versions can be found at
[Nodesource](https://github.com/nodesource/distributions).
### Setup Zenko
### Clone source code
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
access keys, but it's not published, so let's use a file for now.
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
in this file.
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
### Start Zenko
Start the S3 server with: `node index.js`
If you use default settings, Zenko CloudServer starts on port 8000.
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
Now you can access your S3 with `s3cmd` or `geesefs`:
```
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
```shell
git clone https://github.com/scality/S3.git
```
```
AWS_ACCESS_KEY_ID=accessKey1 \
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
geesefs --endpoint http://localhost:8000 testbucket mountdir
### Install js dependencies
Go to the ./S3 folder,
```shell
yarn install --frozen-lockfile
```
# Author & License
If you get an error regarding installation of the diskUsage module,
please install g++.
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)
If you get an error regarding level-down bindings, try clearing your yarn cache:
```shell
yarn cache clean
```
## Run it with a file backend
```shell
yarn start
```
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
9991 are also open locally for internal transfer of metadata and data,
respectively.
The default access key is accessKey1 with
a secret key of verySecretKey1.
By default the metadata files will be saved in the
localMetadata directory and the data files will be saved
in the localData directory within the ./S3 directory on your
machine. These directories have been pre-created within the
repository. If you would like to save the data or metadata in
different locations of your choice, you must specify them with absolute paths.
So, when starting the server:
```shell
mkdir -m 700 $(pwd)/myFavoriteDataPath
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
yarn start
```
## Run it with multiple data backends
```shell
export S3DATA='multiple'
yarn start
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
With multiple backends, you have the ability to
choose where each object will be saved by setting
the following header with a locationConstraint on
a PUT request:
```shell
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
```
If no header is sent with a PUT object request, the
location constraint of the bucket will determine
where the data is saved. If the bucket has no location
constraint, the endpoint of the PUT request will be
used to determine location.
See the Configuration section in our documentation
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
to learn how to set location constraints.
## Run it with an in-memory backend
```shell
yarn run mem_backend
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
## Run it with Vault user management
Note: Vault is proprietary and must be accessed separately.
```shell
export S3VAULT=vault
yarn start
```
This starts a Zenko CloudServer using Vault for user management.
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae

46
bin/metrics_server.js Executable file
View File

@ -0,0 +1,46 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?metrics=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

46
bin/secure_channel_proxy.js Executable file
View File

@ -0,0 +1,46 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?proxy=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

View File

@ -4,7 +4,6 @@
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"workers": 4,
"restEndpoints": {
"localhost": "us-east-1",
"127.0.0.1": "us-east-1",
@ -102,14 +101,6 @@
"readPreference": "primary",
"database": "metadata"
},
"authdata": "authdata.json",
"backends": {
"auth": "file",
"data": "file",
"metadata": "mongodb",
"kms": "file",
"quota": "none"
},
"externalBackends": {
"aws_s3": {
"httpAgent": {

View File

@ -1,71 +0,0 @@
{
"port": 8000,
"listenOn": [],
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"restEndpoints": {
"localhost": "STANDARD",
"127.0.0.1": "STANDARD",
"yourhostname.ru": "STANDARD"
},
"websiteEndpoints": [
"static.yourhostname.ru"
],
"replicationEndpoints": [ {
"site": "zenko",
"servers": ["127.0.0.1:8000"],
"default": true
} ],
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"backends": {
"metadata": "mongodb"
},
"mongodb": {
"replicaSetHosts": "127.0.0.1:27017",
"writeConcern": "majority",
"replicaSet": "rs0",
"readPreference": "primary",
"database": "s3",
"authCredentials": {
"username": "s3",
"password": ""
}
},
"externalBackends": {
"aws_s3": {
"httpAgent": {
"keepAlive": false,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
},
"gcp": {
"httpAgent": {
"keepAlive": true,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
}
},
"requests": {
"viaProxy": false,
"trustedProxyCIDRs": [],
"extractClientIPFromHeader": ""
},
"bucketNotificationDestinations": [
{
"resource": "target1",
"type": "dummy",
"host": "localhost:6000"
}
]
}

View File

@ -116,7 +116,7 @@ const constants = {
],
// user metadata header to set object locationConstraint
objectLocationConstraintHeader: 'x-amz-storage-class',
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
legacyLocations: ['sproxyd', 'legacy'],
// declare here all existing service accounts and their properties
@ -205,6 +205,9 @@ const constants = {
],
allowedUtapiEventFilterStates: ['allow', 'deny'],
allowedRestoreObjectRequestTierValues: ['Standard'],
validStorageClasses: [
'STANDARD',
],
lifecycleListing: {
CURRENT_TYPE: 'current',
NON_CURRENT_TYPE: 'noncurrent',

View File

@ -1,10 +1,10 @@
'use strict'; // eslint-disable-line strict
require('werelogs').stderrUtils.catchAndTimestampStderr(
undefined,
// Do not exit as workers have their own listener that will exit
// But primary don't have another listener
require('cluster').isPrimary ? 1 : null,
);
/**
* Catch uncaught exceptions and add timestamp to aid debugging
*/
process.on('uncaughtException', err => {
process.stderr.write(`${new Date().toISOString()}: Uncaught exception: \n${err.stack}`);
});
require('./lib/server.js')();

View File

@ -107,47 +107,6 @@ function parseSproxydConfig(configSproxyd) {
return joi.attempt(configSproxyd, joiSchema, 'bad config');
}
function parseRedisConfig(redisConfig) {
const joiSchema = joi.object({
password: joi.string().allow(''),
host: joi.string(),
port: joi.number(),
retry: joi.object({
connectBackoff: joi.object({
min: joi.number().required(),
max: joi.number().required(),
jitter: joi.number().required(),
factor: joi.number().required(),
deadline: joi.number().required(),
}),
}),
// sentinel config
sentinels: joi.alternatives().try(
joi.string()
.pattern(/^[a-zA-Z0-9.-]+:[0-9]+(,[a-zA-Z0-9.-]+:[0-9]+)*$/)
.custom(hosts => hosts.split(',').map(item => {
const [host, port] = item.split(':');
return { host, port: Number.parseInt(port, 10) };
})),
joi.array().items(
joi.object({
host: joi.string().required(),
port: joi.number().required(),
})
).min(1),
),
name: joi.string(),
sentinelPassword: joi.string().allow(''),
})
.and('host', 'port')
.and('sentinels', 'name')
.xor('host', 'sentinels')
.without('sentinels', ['host', 'port'])
.without('host', ['sentinels', 'sentinelPassword']);
return joi.attempt(redisConfig, joiSchema, 'bad config');
}
function restEndpointsAssert(restEndpoints, locationConstraints) {
assert(typeof restEndpoints === 'object',
'bad config: restEndpoints must be an object of endpoints');
@ -377,7 +336,7 @@ function dmfLocationConstraintAssert(locationObj) {
function locationConstraintAssert(locationConstraints) {
const supportedBackends =
['mem', 'file', 'scality',
'mongodb', 'dmf', 'azure_archive', 'vitastor'].concat(Object.keys(validExternalBackends));
'mongodb', 'dmf', 'azure_archive'].concat(Object.keys(validExternalBackends));
assert(typeof locationConstraints === 'object',
'bad config: locationConstraints must be an object');
Object.keys(locationConstraints).forEach(l => {
@ -502,23 +461,27 @@ function locationConstraintAssert(locationConstraints) {
locationConstraints[l].details.connector.hdclient);
}
});
assert(Object.keys(locationConstraints)
.includes('us-east-1'), 'bad locationConfig: must ' +
'include us-east-1 as a locationConstraint');
}
function parseUtapiReindex(config) {
const {
enabled,
schedule,
redis,
sentinel,
bucketd,
onlyCountLatestWhenObjectLocked,
} = config;
assert(typeof enabled === 'boolean',
'bad config: utapi.reindex.enabled must be a boolean');
const parsedRedis = parseRedisConfig(redis);
assert(Array.isArray(parsedRedis.sentinels),
'bad config: utapi reindex redis config requires a list of sentinels');
'bad config: utapi.reindex.enabled must be a boolean');
assert(typeof sentinel === 'object',
'bad config: utapi.reindex.sentinel must be an object');
assert(typeof sentinel.port === 'number',
'bad config: utapi.reindex.sentinel.port must be a number');
assert(typeof sentinel.name === 'string',
'bad config: utapi.reindex.sentinel.name must be a string');
assert(typeof bucketd === 'object',
'bad config: utapi.reindex.bucketd must be an object');
assert(typeof bucketd.port === 'number',
@ -536,13 +499,6 @@ function parseUtapiReindex(config) {
'bad config: utapi.reindex.schedule must be a valid ' +
`cron schedule. ${e.message}.`);
}
return {
enabled,
schedule,
redis: parsedRedis,
bucketd,
onlyCountLatestWhenObjectLocked,
};
}
function requestsConfigAssert(requestsConfig) {
@ -630,6 +586,7 @@ class Config extends EventEmitter {
// Read config automatically
this._getLocationConfig();
this._getConfig();
this._configureBackends();
}
_getLocationConfig() {
@ -841,11 +798,11 @@ class Config extends EventEmitter {
this.websiteEndpoints = config.websiteEndpoints;
}
this.workers = false;
if (config.workers !== undefined) {
assert(Number.isInteger(config.workers) && config.workers > 0,
'bad config: workers must be a positive integer');
this.workers = config.workers;
this.clusters = false;
if (config.clusters !== undefined) {
assert(Number.isInteger(config.clusters) && config.clusters > 0,
'bad config: clusters must be a positive integer');
this.clusters = config.clusters;
}
if (config.usEastBehavior !== undefined) {
@ -1083,7 +1040,8 @@ class Config extends EventEmitter {
assert(typeof config.localCache.port === 'number',
'config: bad port for localCache. port must be a number');
if (config.localCache.password !== undefined) {
assert(typeof config.localCache.password === 'string',
assert(
this._verifyRedisPassword(config.localCache.password),
'config: vad password for localCache. password must' +
' be a string');
}
@ -1109,7 +1067,55 @@ class Config extends EventEmitter {
}
if (config.redis) {
this.redis = parseRedisConfig(config.redis);
if (config.redis.sentinels) {
this.redis = { sentinels: [], name: null };
assert(typeof config.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.redis.name = config.redis.name;
assert(Array.isArray(config.redis.sentinels) ||
typeof config.redis.sentinels === 'string',
'bad config: redis sentinels must be an array or string');
if (typeof config.redis.sentinels === 'string') {
config.redis.sentinels.split(',').forEach(item => {
const [host, port] = item.split(':');
this.redis.sentinels.push({ host,
port: Number.parseInt(port, 10) });
});
} else if (Array.isArray(config.redis.sentinels)) {
config.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.redis.sentinels.push({ host, port });
});
}
if (config.redis.sentinelPassword !== undefined) {
assert(
this._verifyRedisPassword(config.redis.sentinelPassword));
this.redis.sentinelPassword = config.redis.sentinelPassword;
}
} else {
// check for standalone configuration
this.redis = {};
assert(typeof config.redis.host === 'string',
'bad config: redis.host must be a string');
assert(typeof config.redis.port === 'number',
'bad config: redis.port must be a number');
this.redis.host = config.redis.host;
this.redis.port = config.redis.port;
}
if (config.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.redis.password),
'bad config: invalid password for redis. password must ' +
'be a string');
this.redis.password = config.redis.password;
}
}
if (config.scuba) {
this.scuba = {};
@ -1177,8 +1183,50 @@ class Config extends EventEmitter {
assert(config.redis, 'missing required property of utapi ' +
'configuration: redis');
if (config.utapi.redis) {
this.utapi.redis = parseRedisConfig(config.utapi.redis);
if (this.utapi.redis.retry === undefined) {
if (config.utapi.redis.sentinels) {
this.utapi.redis = { sentinels: [], name: null };
assert(typeof config.utapi.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.utapi.redis.name = config.utapi.redis.name;
assert(Array.isArray(config.utapi.redis.sentinels),
'bad config: redis sentinels must be an array');
config.utapi.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.utapi.redis.sentinels.push({ host, port });
});
} else {
// check for standalone configuration
this.utapi.redis = {};
assert(typeof config.utapi.redis.host === 'string',
'bad config: redis.host must be a string');
assert(typeof config.utapi.redis.port === 'number',
'bad config: redis.port must be a number');
this.utapi.redis.host = config.utapi.redis.host;
this.utapi.redis.port = config.utapi.redis.port;
}
if (config.utapi.redis.retry !== undefined) {
if (config.utapi.redis.retry.connectBackoff !== undefined) {
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
assert.strictEqual(typeof min, 'number',
'utapi.redis.retry.connectBackoff: min must be a number');
assert.strictEqual(typeof max, 'number',
'utapi.redis.retry.connectBackoff: max must be a number');
assert.strictEqual(typeof jitter, 'number',
'utapi.redis.retry.connectBackoff: jitter must be a number');
assert.strictEqual(typeof factor, 'number',
'utapi.redis.retry.connectBackoff: factor must be a number');
assert.strictEqual(typeof deadline, 'number',
'utapi.redis.retry.connectBackoff: deadline must be a number');
}
this.utapi.redis.retry = config.utapi.redis.retry;
} else {
this.utapi.redis.retry = {
connectBackoff: {
min: 10,
@ -1189,6 +1237,22 @@ class Config extends EventEmitter {
},
};
}
if (config.utapi.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.utapi.redis.password),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.password = config.utapi.redis.password;
}
if (config.utapi.redis.sentinelPassword !== undefined) {
assert(
this._verifyRedisPassword(
config.utapi.redis.sentinelPassword),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.sentinelPassword =
config.utapi.redis.sentinelPassword;
}
}
if (config.utapi.metrics) {
this.utapi.metrics = config.utapi.metrics;
@ -1258,7 +1322,8 @@ class Config extends EventEmitter {
}
if (config.utapi && config.utapi.reindex) {
this.utapi.reindex = parseUtapiReindex(config.utapi.reindex);
parseUtapiReindex(config.utapi.reindex);
this.utapi.reindex = config.utapi.reindex;
}
}
@ -1303,8 +1368,6 @@ class Config extends EventEmitter {
}
}
this.authdata = config.authdata || 'authdata.json';
this.kms = {};
if (config.kms) {
assert(typeof config.kms.userName === 'string');
@ -1524,6 +1587,25 @@ class Config extends EventEmitter {
this.outboundProxy.certs = certObj.certs;
}
this.managementAgent = {};
this.managementAgent.port = 8010;
this.managementAgent.host = 'localhost';
if (config.managementAgent !== undefined) {
if (config.managementAgent.port !== undefined) {
assert(Number.isInteger(config.managementAgent.port)
&& config.managementAgent.port > 0,
'bad config: managementAgent port must be a positive ' +
'integer');
this.managementAgent.port = config.managementAgent.port;
}
if (config.managementAgent.host !== undefined) {
assert.strictEqual(typeof config.managementAgent.host, 'string',
'bad config: management agent host must ' +
'be a string');
this.managementAgent.host = config.managementAgent.host;
}
}
// Ephemeral token to protect the reporting endpoint:
// try inherited from parent first, then hardcoded in conf file,
// then create a fresh one as last resort.
@ -1613,8 +1695,6 @@ class Config extends EventEmitter {
'bad config: maxScannedLifecycleListingEntries must be greater than 2');
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
}
this._configureBackends(config);
}
_setTimeOptions() {
@ -1653,43 +1733,41 @@ class Config extends EventEmitter {
}
_getAuthData() {
return JSON.parse(fs.readFileSync(findConfigFile(process.env.S3AUTH_CONFIG || this.authdata), { encoding: 'utf-8' }));
return require(findConfigFile(process.env.S3AUTH_CONFIG || 'authdata.json'));
}
_configureBackends(config) {
const backends = config.backends || {};
_configureBackends() {
/**
* Configure the backends for Authentication, Data and Metadata.
*/
let auth = backends.auth || 'mem';
let data = backends.data || 'multiple';
let metadata = backends.metadata || 'file';
let kms = backends.kms || 'file';
let quota = backends.quota || 'none';
let auth = 'mem';
let data = 'multiple';
let metadata = 'file';
let kms = 'file';
let quota = 'none';
if (process.env.S3BACKEND) {
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
'bad environment variable: S3BACKEND environment variable ' +
'should be one of mem/file/scality/cdmi'
);
auth = process.env.S3BACKEND == 'scality' ? 'scality' : 'mem';
auth = process.env.S3BACKEND;
data = process.env.S3BACKEND;
metadata = process.env.S3BACKEND;
kms = process.env.S3BACKEND;
}
if (process.env.S3VAULT) {
auth = process.env.S3VAULT;
auth = (auth === 'file' || auth === 'mem' || auth === 'cdmi' ? 'mem' : auth);
}
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
// Auth only checks for 'mem' since mem === file
auth = 'mem';
let authData;
if (process.env.SCALITY_ACCESS_KEY_ID &&
process.env.SCALITY_SECRET_ACCESS_KEY) {
process.env.SCALITY_SECRET_ACCESS_KEY) {
authData = buildAuthDataAccount(
process.env.SCALITY_ACCESS_KEY_ID,
process.env.SCALITY_SECRET_ACCESS_KEY);
process.env.SCALITY_ACCESS_KEY_ID,
process.env.SCALITY_SECRET_ACCESS_KEY);
} else {
authData = this._getAuthData();
}
@ -1697,7 +1775,7 @@ class Config extends EventEmitter {
throw new Error('bad config: invalid auth config file.');
}
this.authData = authData;
} else if (auth === 'multiple') {
} else if (auth === 'multiple') {
const authData = this._getAuthData();
if (validateAuthConfig(authData)) {
throw new Error('bad config: invalid auth config file.');
@ -1712,9 +1790,9 @@ class Config extends EventEmitter {
'should be one of mem/file/scality/multiple'
);
data = process.env.S3DATA;
if (data === 'scality' || data === 'multiple') {
data = 'multiple';
}
}
if (data === 'scality' || data === 'multiple') {
data = 'multiple';
}
assert(this.locationConstraints !== undefined &&
this.restEndpoints !== undefined,
@ -1739,6 +1817,10 @@ class Config extends EventEmitter {
};
}
_verifyRedisPassword(password) {
return typeof password === 'string';
}
setAuthDataAccounts(accounts) {
this.authData.accounts = accounts;
this.emit('authdata-update');
@ -1873,7 +1955,6 @@ class Config extends EventEmitter {
module.exports = {
parseSproxydConfig,
parseRedisConfig,
locationConstraintAssert,
ConfigObject: Config,
config: new Config(),

View File

@ -52,7 +52,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
apiMethod, 's3');
}
if (apiMethod === 'bucketPut') {
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
return null;
}
@ -65,17 +65,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
const requestContexts = [];
if (apiMethod === 'multiObjectDelete') {
// MultiObjectDelete does not require any authorization when evaluating
// the API. Instead, we authorize each object passed.
// But in order to get any relevant information from the authorization service
// for example, the account quota, we must send a request context object
// with no `specificResource`. We expect the result to be an implicit deny.
// In the API, we then ignore these authorization results, and we can use
// any information returned, e.g., the quota.
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
requestContexts.push(requestContextMultiObjectDelete);
} else if (apiMethodAfterVersionCheck === 'objectCopy'
if (apiMethodAfterVersionCheck === 'objectCopy'
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
'objectGet';

View File

@ -131,8 +131,8 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
const actions = Array.isArray(mdValueParams.requestType) ?
mdValueParams.requestType : [mdValueParams.requestType];
const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD);
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes,
false, log, err => next(err, bucketMD, objectMD));
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes, log,
err => next(err, bucketMD, objectMD));
},
function updateObjectMD(bucketMD, objectMD, next) {
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};

View File

@ -189,14 +189,12 @@ function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
* @param {array} apiNames - action names: operations to authorize
* @param {string} apiMethod - the main API call
* @param {number} inflight - inflight bytes
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
* the incoming bytes, are under the limit.
* @param {Logger} log - logger
* @param {function} callback - callback function
* @returns {boolean} - true if the quota is valid, false otherwise
*/
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, log, callback) {
if (!config.isQuotaEnabled() || !inflight) {
return callback(null);
}
let type;
@ -230,11 +228,6 @@ function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight,
return callback(null);
}
if (isStorageReserved) {
// eslint-disable-next-line no-param-reassign
inflight = 0;
}
return async.forEach(apiNames, (apiName, done) => {
// Object copy operations first check the target object,
// meaning the source object, containing the current bytes,
@ -255,8 +248,7 @@ function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight,
let _inflights = shouldSendInflights ? inflight : undefined;
const inflightForCheck = shouldSendInflights ? 0 : inflight;
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
inflightForCheck, apiName, log,
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
inflightForCheck, apiName, log, (err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
if (err) {
return done(err);
}

View File

@ -45,8 +45,9 @@ function checkLocationConstraint(request, locationConstraint, log) {
} else if (parsedHost && restEndpoints[parsedHost]) {
locationConstraintChecked = restEndpoints[parsedHost];
} else {
locationConstraintChecked = Object.keys(locationConstrains)[0];
log.trace('no location constraint provided on bucket put; setting '+locationConstraintChecked);
log.trace('no location constraint provided on bucket put;' +
'setting us-east-1');
locationConstraintChecked = 'us-east-1';
}
if (!locationConstraints[locationConstraintChecked]) {

View File

@ -94,6 +94,9 @@ function completeMultipartUpload(authInfo, request, log, callback) {
let versionId;
const putVersionId = request.headers['x-scal-s3-version-id'];
console.log('completeMPU > request.headers', request.headers);
const isPutVersion = putVersionId || putVersionId === '';
if (putVersionId) {
const decodedVidResult = decodeVID(putVersionId);

View File

@ -6,7 +6,6 @@ const convertToXml = s3middleware.convertToXml;
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { hasNonPrintables } = require('../utilities/stringChecks');
const { config } = require('../Config');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const constants = require('../../constants');
const services = require('../services');
@ -51,6 +50,8 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
const bucketName = request.bucketName;
const objectKey = request.objectKey;
console.log('initiateMPU > request.headers', request.headers);
if (hasNonPrintables(objectKey)) {
return callback(errors.InvalidInput.customizeDescription(
'object keys cannot contain non-printable characters',
@ -66,7 +67,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
const websiteRedirectHeader =
request.headers['x-amz-website-redirect-location'];
if (request.headers['x-amz-storage-class'] &&
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', bucketName,
errors.InvalidStorageClass.code, 'initiateMultipartUpload');

View File

@ -335,7 +335,7 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
},
(objMD, versionId, callback) => validateQuotas(
request, bucket, request.accountQuotas, ['objectDelete'], 'objectDelete',
-objMD?.['content-length'] || 0, false, log, err => callback(err, objMD, versionId)),
-objMD?.['content-length'] || 0, log, err => callback(err, objMD, versionId)),
(objMD, versionId, callback) => {
const options = preprocessingVersioningDelete(
bucketName, bucket, objMD, versionId, config.nullVersionCompatMode);
@ -508,9 +508,8 @@ function multiObjectDelete(authInfo, request, log, callback) {
if (bucketShield(bucketMD, 'objectDelete')) {
return next(errors.NoSuchBucket);
}
// The implicit deny flag is ignored in the DeleteObjects API, as authorization only
// affects the objects.
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) {
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request,
request.actionImplicitDenies)) {
log.trace("access denied due to bucket acl's");
// if access denied at the bucket level, no access for
// any of the objects so all results will be error results

View File

@ -249,7 +249,7 @@ function objectCopy(authInfo, request, sourceBucket,
const responseHeaders = {};
if (request.headers['x-amz-storage-class'] &&
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', destBucketName,
errors.InvalidStorageClass.code, 'copyObject');

View File

@ -3,7 +3,6 @@ const { errors, versioning } = require('arsenal');
const constants = require('../../constants');
const aclUtils = require('../utilities/aclUtils');
const { config } = require('../Config');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -72,7 +71,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
query,
} = request;
if (headers['x-amz-storage-class'] &&
!config.locationConstraints[headers['x-amz-storage-class']]) {
!constants.validStorageClasses.includes(headers['x-amz-storage-class'])) {
log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', request.bucketName,
errors.InvalidStorageClass.code, 'putObject');
@ -99,7 +98,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
'The encryption method specified is not supported');
const requestType = request.apiMethods || 'objectPut';
const valParams = { authInfo, bucketName, objectKey, versionId,
requestType, request, withVersionId: isPutVersion };
requestType, request };
const canonicalID = authInfo.getCanonicalID();
if (hasNonPrintables(objectKey)) {

View File

@ -199,7 +199,7 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
copyObjectSize, sourceVerId,
sourceLocationConstraintName, sourceObjMD, next) {
return validateQuotas(request, destBucketMD, request.accountQuotas, valPutParams.requestType,
request.apiMethod, sourceObjMD?.['content-length'] || 0, false, log, err =>
request.apiMethod, sourceObjMD?.['content-length'] || 0, log, err =>
next(err, dataLocator, destBucketMD, copyObjectSize, sourceVerId, sourceLocationConstraintName));
},
// get MPU shadow bucket to get splitter based on MD version

View File

@ -61,8 +61,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
log.debug('processing request', { method: 'objectPutPart' });
const size = request.parsedContentLength;
const putVersionId = request.headers['x-scal-s3-version-id'];
const isPutVersion = putVersionId || putVersionId === '';
console.log('objectPutPart > request.headers', request.headers);
if (Number.parseInt(size, 10) > constants.maximumAllowedPartSize) {
log.debug('put part size too large', { size });
@ -137,7 +136,7 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
return next(null, destinationBucket);
},
(destinationBucket, next) => validateQuotas(request, destinationBucket, request.accountQuotas,
requestType, request.apiMethod, size, isPutVersion, log, err => next(err, destinationBucket)),
requestType, request.apiMethod, size, log, err => next(err, destinationBucket)),
// Get bucket server-side encryption, if it exists.
(destinationBucket, next) => getObjectSSEConfiguration(
request.headers, destinationBucket, log,

View File

@ -1,3 +1,4 @@
const vaultclient = require('vaultclient');
const { auth } = require('arsenal');
const { config } = require('../Config');
@ -20,7 +21,6 @@ function getVaultClient(config) {
port,
https: true,
});
const vaultclient = require('vaultclient');
vaultClient = new vaultclient.Client(host, port, true, key, cert, ca);
} else {
logger.info('vaultclient configuration', {
@ -28,7 +28,6 @@ function getVaultClient(config) {
port,
https: false,
});
const vaultclient = require('vaultclient');
vaultClient = new vaultclient.Client(host, port);
}
@ -50,6 +49,10 @@ function getMemBackend(config) {
}
switch (config.backends.auth) {
case 'mem':
implName = 'vaultMem';
client = getMemBackend(config);
break;
case 'multiple':
implName = 'vaultChain';
client = new ChainBackend('s3', [
@ -57,14 +60,9 @@ case 'multiple':
getVaultClient(config),
]);
break;
case 'vault':
default: // vault
implName = 'vault';
client = getVaultClient(config);
break;
default: // mem
implName = 'vaultMem';
client = getMemBackend(config);
break;
}
module.exports = new Vault(client, implName);

View File

@ -8,6 +8,20 @@ const inMemory = require('./in_memory/backend').backend;
const file = require('./file/backend');
const KMIPClient = require('arsenal').network.kmipClient;
const Common = require('./common');
let scalityKMS;
let scalityKMSImpl;
try {
// eslint-disable-next-line import/no-unresolved
const ScalityKMS = require('scality-kms');
scalityKMS = new ScalityKMS(config.kms);
scalityKMSImpl = 'scalityKms';
} catch (error) {
logger.warn('scality kms unavailable. ' +
'Using file kms backend unless mem specified.',
{ error });
scalityKMS = file;
scalityKMSImpl = 'fileKms';
}
let client;
let implName;
@ -19,9 +33,8 @@ if (config.backends.kms === 'mem') {
client = file;
implName = 'fileKms';
} else if (config.backends.kms === 'scality') {
const ScalityKMS = require('scality-kms');
client = new ScalityKMS(config.kms);
implName = 'scalityKms';
client = scalityKMS;
implName = scalityKMSImpl;
} else if (config.backends.kms === 'kmip') {
const kmipConfig = { kmip: config.kmip };
if (!kmipConfig.kmip) {

View File

@ -0,0 +1,131 @@
/**
* Target service that should handle a message
* @readonly
* @enum {number}
*/
const MessageType = {
/** Message that contains a configuration overlay */
CONFIG_OVERLAY_MESSAGE: 1,
/** Message that requests a metrics report */
METRICS_REQUEST_MESSAGE: 2,
/** Message that contains a metrics report */
METRICS_REPORT_MESSAGE: 3,
/** Close the virtual TCP socket associated to the channel */
CHANNEL_CLOSE_MESSAGE: 4,
/** Write data to the virtual TCP socket associated to the channel */
CHANNEL_PAYLOAD_MESSAGE: 5,
};
/**
* Target service that should handle a message
* @readonly
* @enum {number}
*/
const TargetType = {
/** Let the dispatcher choose the most appropriate message */
TARGET_ANY: 0,
};
const headerSize = 3;
class ChannelMessageV0 {
/**
* @param {Buffer} buffer Message bytes
*/
constructor(buffer) {
this.messageType = buffer.readUInt8(0);
this.channelNumber = buffer.readUInt8(1);
this.target = buffer.readUInt8(2);
this.payload = buffer.slice(headerSize);
}
/**
* @returns {number} Message type
*/
getType() {
return this.messageType;
}
/**
* @returns {number} Channel number if applicable
*/
getChannelNumber() {
return this.channelNumber;
}
/**
* @returns {number} Target service, or 0 to choose automatically
*/
getTarget() {
return this.target;
}
/**
* @returns {Buffer} Message payload if applicable
*/
getPayload() {
return this.payload;
}
/**
* Creates a wire representation of a channel close message
*
* @param {number} channelId Channel number
*
* @returns {Buffer} wire representation
*/
static encodeChannelCloseMessage(channelId) {
const buf = Buffer.alloc(headerSize);
buf.writeUInt8(MessageType.CHANNEL_CLOSE_MESSAGE, 0);
buf.writeUInt8(channelId, 1);
buf.writeUInt8(TargetType.TARGET_ANY, 2);
return buf;
}
/**
* Creates a wire representation of a channel data message
*
* @param {number} channelId Channel number
* @param {Buffer} data Payload
*
* @returns {Buffer} wire representation
*/
static encodeChannelDataMessage(channelId, data) {
const buf = Buffer.alloc(data.length + headerSize);
buf.writeUInt8(MessageType.CHANNEL_PAYLOAD_MESSAGE, 0);
buf.writeUInt8(channelId, 1);
buf.writeUInt8(TargetType.TARGET_ANY, 2);
data.copy(buf, headerSize);
return buf;
}
/**
* Creates a wire representation of a metrics message
*
* @param {object} body Metrics report
*
* @returns {Buffer} wire representation
*/
static encodeMetricsReportMessage(body) {
const report = JSON.stringify(body);
const buf = Buffer.alloc(report.length + headerSize);
buf.writeUInt8(MessageType.METRICS_REPORT_MESSAGE, 0);
buf.writeUInt8(0, 1);
buf.writeUInt8(TargetType.TARGET_ANY, 2);
buf.write(report, headerSize);
return buf;
}
/**
* Protocol name used for subprotocol negociation
*/
static get protocolName() {
return 'zenko-secure-channel-v0';
}
}
module.exports = {
ChannelMessageV0,
MessageType,
TargetType,
};

View File

@ -0,0 +1,94 @@
const WebSocket = require('ws');
const arsenal = require('arsenal');
const logger = require('../utilities/logger');
const _config = require('../Config').config;
const { patchConfiguration } = require('./configuration');
const { reshapeExceptionError } = arsenal.errorUtils;
const managementAgentMessageType = {
/** Message that contains the loaded overlay */
NEW_OVERLAY: 1,
};
const CONNECTION_RETRY_TIMEOUT_MS = 5000;
function initManagementClient() {
const { host, port } = _config.managementAgent;
const ws = new WebSocket(`ws://${host}:${port}/watch`);
ws.on('open', () => {
logger.info('connected with management agent');
});
ws.on('close', (code, reason) => {
logger.info('disconnected from management agent', { reason });
setTimeout(initManagementClient, CONNECTION_RETRY_TIMEOUT_MS);
});
ws.on('error', error => {
logger.error('error on connection with management agent', { error });
});
ws.on('message', data => {
const method = 'initManagementclient::onMessage';
const log = logger.newRequestLogger();
let msg;
if (!data) {
log.error('message without data', { method });
return;
}
try {
msg = JSON.parse(data);
} catch (err) {
log.error('data is an invalid json', { method, err, data });
return;
}
if (msg.payload === undefined) {
log.error('message without payload', { method });
return;
}
if (typeof msg.messageType !== 'number') {
log.error('messageType is not an integer', {
type: typeof msg.messageType,
method,
});
return;
}
switch (msg.messageType) {
case managementAgentMessageType.NEW_OVERLAY:
patchConfiguration(msg.payload, log, err => {
if (err) {
log.error('failed to patch overlay', {
error: reshapeExceptionError(err),
method,
});
}
});
return;
default:
log.error('new overlay message with unmanaged message type', {
method,
type: msg.messageType,
});
return;
}
});
}
function isManagementAgentUsed() {
return process.env.MANAGEMENT_USE_AGENT === '1';
}
module.exports = {
managementAgentMessageType,
initManagementClient,
isManagementAgentUsed,
};

View File

@ -0,0 +1,240 @@
const arsenal = require('arsenal');
const { buildAuthDataAccount } = require('../auth/in_memory/builder');
const _config = require('../Config').config;
const metadata = require('../metadata/wrapper');
const { getStoredCredentials } = require('./credentials');
const latestOverlayVersionKey = 'configuration/overlay-version';
const managementDatabaseName = 'PENSIEVE';
const replicatorEndpoint = 'zenko-cloudserver-replicator';
const { decryptSecret } = arsenal.pensieve.credentialUtils;
const { patchLocations } = arsenal.patches.locationConstraints;
const { reshapeExceptionError } = arsenal.errorUtils;
const { replicationBackends } = require('arsenal').constants;
function overlayHasVersion(overlay) {
return overlay && overlay.version !== undefined;
}
function remoteOverlayIsNewer(cachedOverlay, remoteOverlay) {
return (overlayHasVersion(remoteOverlay) &&
(!overlayHasVersion(cachedOverlay) ||
remoteOverlay.version > cachedOverlay.version));
}
/**
* Updates the live {Config} object with the new overlay configuration.
*
* No-op if this version was already applied to the live {Config}.
*
* @param {object} newConf Overlay configuration to apply
* @param {werelogs~Logger} log Request-scoped logger
* @param {function} cb Function to call with (error, newConf)
*
* @returns {undefined}
*/
function patchConfiguration(newConf, log, cb) {
if (newConf.version === undefined) {
log.debug('no remote configuration created yet');
return process.nextTick(cb, null, newConf);
}
if (_config.overlayVersion !== undefined &&
newConf.version <= _config.overlayVersion) {
log.debug('configuration version already applied',
{ configurationVersion: newConf.version });
return process.nextTick(cb, null, newConf);
}
return getStoredCredentials(log, (err, creds) => {
if (err) {
return cb(err);
}
const accounts = [];
if (newConf.users) {
newConf.users.forEach(u => {
if (u.secretKey && u.secretKey.length > 0) {
const secretKey = decryptSecret(creds, u.secretKey);
// accountType will be service-replication or service-clueso
let serviceName;
if (u.accountType && u.accountType.startsWith('service-')) {
serviceName = u.accountType.split('-')[1];
}
const newAccount = buildAuthDataAccount(
u.accessKey, secretKey, u.canonicalId, serviceName,
u.userName);
accounts.push(newAccount.accounts[0]);
}
});
}
const restEndpoints = Object.assign({}, _config.restEndpoints);
if (newConf.endpoints) {
newConf.endpoints.forEach(e => {
restEndpoints[e.hostname] = e.locationName;
});
}
if (!restEndpoints[replicatorEndpoint]) {
restEndpoints[replicatorEndpoint] = 'us-east-1';
}
const locations = patchLocations(newConf.locations, creds, log);
if (Object.keys(locations).length !== 0) {
try {
_config.setLocationConstraints(locations);
} catch (error) {
const exceptionError = reshapeExceptionError(error);
log.error('could not apply configuration version location ' +
'constraints', { error: exceptionError,
method: 'getStoredCredentials' });
return cb(exceptionError);
}
try {
const locationsWithReplicationBackend = Object.keys(locations)
// NOTE: In Orbit, we don't need to have Scality location in our
// replication endpoind config, since we do not replicate to
// any Scality Instance yet.
.filter(key => replicationBackends
[locations[key].type])
.reduce((obj, key) => {
/* eslint no-param-reassign:0 */
obj[key] = locations[key];
return obj;
}, {});
_config.setReplicationEndpoints(
locationsWithReplicationBackend);
} catch (error) {
const exceptionError = reshapeExceptionError(error);
log.error('could not apply replication endpoints',
{ error: exceptionError, method: 'getStoredCredentials' });
return cb(exceptionError);
}
}
_config.setAuthDataAccounts(accounts);
_config.setRestEndpoints(restEndpoints);
_config.setPublicInstanceId(newConf.instanceId);
if (newConf.browserAccess) {
if (Boolean(_config.browserAccessEnabled) !==
Boolean(newConf.browserAccess.enabled)) {
_config.browserAccessEnabled =
Boolean(newConf.browserAccess.enabled);
_config.emit('browser-access-enabled-change');
}
}
_config.overlayVersion = newConf.version;
log.info('applied configuration version',
{ configurationVersion: _config.overlayVersion });
return cb(null, newConf);
});
}
/**
* Writes configuration version to the management database
*
* @param {object} cachedOverlay Latest stored configuration version
* for freshness comparison purposes
* @param {object} remoteOverlay New configuration version
* @param {werelogs~Logger} log Request-scoped logger
* @param {function} cb Function to call with (error, remoteOverlay)
*
* @returns {undefined}
*/
function saveConfigurationVersion(cachedOverlay, remoteOverlay, log, cb) {
if (remoteOverlayIsNewer(cachedOverlay, remoteOverlay)) {
const objName = `configuration/overlay/${remoteOverlay.version}`;
metadata.putObjectMD(managementDatabaseName, objName, remoteOverlay,
{}, log, error => {
if (error) {
const exceptionError = reshapeExceptionError(error);
log.error('could not save configuration',
{ error: exceptionError,
method: 'saveConfigurationVersion',
configurationVersion: remoteOverlay.version });
cb(exceptionError);
return;
}
metadata.putObjectMD(managementDatabaseName,
latestOverlayVersionKey, remoteOverlay.version, {}, log,
error => {
if (error) {
log.error('could not save configuration version', {
configurationVersion: remoteOverlay.version,
});
}
cb(error, remoteOverlay);
});
});
} else {
log.debug('no remote configuration to cache yet');
process.nextTick(cb, null, remoteOverlay);
}
}
/**
* Loads the latest cached configuration overlay from the management
* database, without contacting the Orbit API.
*
* @param {werelogs~Logger} log Request-scoped logger
* @param {function} callback Function called with (error, cachedOverlay)
*
* @returns {undefined}
*/
function loadCachedOverlay(log, callback) {
return metadata.getObjectMD(managementDatabaseName,
latestOverlayVersionKey, {}, log, (err, version) => {
if (err) {
if (err.is.NoSuchKey) {
return process.nextTick(callback, null, {});
}
return callback(err);
}
return metadata.getObjectMD(managementDatabaseName,
`configuration/overlay/${version}`, {}, log, (err, conf) => {
if (err) {
if (err.is.NoSuchKey) {
return process.nextTick(callback, null, {});
}
return callback(err);
}
return callback(null, conf);
});
});
}
function applyAndSaveOverlay(overlay, log) {
patchConfiguration(overlay, log, err => {
if (err) {
log.error('could not apply pushed overlay', {
error: reshapeExceptionError(err),
method: 'applyAndSaveOverlay',
});
return;
}
saveConfigurationVersion(null, overlay, log, err => {
if (err) {
log.error('could not cache overlay version', {
error: reshapeExceptionError(err),
method: 'applyAndSaveOverlay',
});
return;
}
log.info('overlay push processed');
});
});
}
module.exports = {
loadCachedOverlay,
managementDatabaseName,
patchConfiguration,
saveConfigurationVersion,
remoteOverlayIsNewer,
applyAndSaveOverlay,
};

View File

@ -0,0 +1,145 @@
const arsenal = require('arsenal');
const forge = require('node-forge');
const request = require('../utilities/request');
const metadata = require('../metadata/wrapper');
const managementDatabaseName = 'PENSIEVE';
const tokenConfigurationKey = 'auth/zenko/remote-management-token';
const tokenRotationDelay = 3600 * 24 * 7 * 1000; // 7 days
const { reshapeExceptionError } = arsenal.errorUtils;
/**
* Retrieves Orbit API token from the management database.
*
* The token is used to authenticate stat posting and
*
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
* initialization process
* @param {function} callback Function called with (error, result)
*
* @returns {undefined}
*/
function getStoredCredentials(log, callback) {
metadata.getObjectMD(managementDatabaseName, tokenConfigurationKey, {},
log, callback);
}
function issueCredentials(managementEndpoint, instanceId, log, callback) {
log.info('registering with API to get token');
const keyPair = forge.pki.rsa.generateKeyPair({ bits: 2048, e: 0x10001 });
const privateKey = forge.pki.privateKeyToPem(keyPair.privateKey);
const publicKey = forge.pki.publicKeyToPem(keyPair.publicKey);
const postData = {
publicKey,
};
request.post(`${managementEndpoint}/${instanceId}/register`,
{ body: postData, json: true }, (error, response, body) => {
if (error) {
return callback(error);
}
if (response.statusCode !== 201) {
log.error('could not register instance', {
statusCode: response.statusCode,
});
return callback(arsenal.errors.InternalError);
}
/* eslint-disable no-param-reassign */
body.privateKey = privateKey;
/* eslint-enable no-param-reassign */
return callback(null, body);
});
}
function confirmInstanceCredentials(
managementEndpoint, instanceId, creds, log, callback) {
const postData = {
serial: creds.serial || 0,
publicKey: creds.publicKey,
};
const opts = {
headers: {
'x-instance-authentication-token': creds.token,
},
body: postData,
};
request.post(`${managementEndpoint}/${instanceId}/confirm`,
opts, (error, response) => {
if (error) {
return callback(error);
}
if (response.statusCode === 200) {
return callback(null, instanceId, creds.token);
}
return callback(arsenal.errors.InternalError);
});
}
/**
* Initializes credentials and PKI in the management database.
*
* In case the management database is new and empty, the instance
* is registered as new against the Orbit API with newly-generated
* RSA key pair.
*
* @param {string} managementEndpoint API endpoint
* @param {string} instanceId UUID of this deployment
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
* initialization process
* @param {function} callback Function called with (error, result)
*
* @returns {undefined}
*/
function initManagementCredentials(
managementEndpoint, instanceId, log, callback) {
getStoredCredentials(log, (error, value) => {
if (error) {
if (error.is.NoSuchKey) {
return issueCredentials(managementEndpoint, instanceId, log,
(error, value) => {
if (error) {
log.error('could not issue token',
{ error: reshapeExceptionError(error),
method: 'initManagementCredentials' });
return callback(error);
}
log.debug('saving token');
return metadata.putObjectMD(managementDatabaseName,
tokenConfigurationKey, value, {}, log, error => {
if (error) {
log.error('could not save token',
{ error: reshapeExceptionError(error),
method: 'initManagementCredentials',
});
return callback(error);
}
log.info('saved token locally, ' +
'confirming instance');
return confirmInstanceCredentials(
managementEndpoint, instanceId, value, log,
callback);
});
});
}
log.debug('could not get token', { error });
return callback(error);
}
log.info('returning existing token');
if (Date.now() - value.issueDate > tokenRotationDelay) {
log.warn('management API token is too old, should re-issue');
}
return callback(null, instanceId, value.token);
});
}
module.exports = {
getStoredCredentials,
initManagementCredentials,
};

138
lib/management/index.js Normal file
View File

@ -0,0 +1,138 @@
const arsenal = require('arsenal');
const async = require('async');
const metadata = require('../metadata/wrapper');
const logger = require('../utilities/logger');
const {
loadCachedOverlay,
managementDatabaseName,
patchConfiguration,
} = require('./configuration');
const { initManagementCredentials } = require('./credentials');
const { startWSManagementClient } = require('./push');
const { startPollingManagementClient } = require('./poll');
const { reshapeExceptionError } = arsenal.errorUtils;
const { isManagementAgentUsed } = require('./agentClient');
const initRemoteManagementRetryDelay = 10000;
const managementEndpointRoot =
process.env.MANAGEMENT_ENDPOINT ||
'https://api.zenko.io';
const managementEndpoint = `${managementEndpointRoot}/api/v1/instance`;
const pushEndpointRoot =
process.env.PUSH_ENDPOINT ||
'https://push.api.zenko.io';
const pushEndpoint = `${pushEndpointRoot}/api/v1/instance`;
function initManagementDatabase(log, callback) {
// XXX choose proper owner names
const md = new arsenal.models.BucketInfo(managementDatabaseName, 'owner',
'owner display name', new Date().toJSON());
metadata.createBucket(managementDatabaseName, md, log, error => {
if (error) {
if (error.is.BucketAlreadyExists) {
log.info('created management database');
return callback();
}
log.error('could not initialize management database',
{ error: reshapeExceptionError(error),
method: 'initManagementDatabase' });
return callback(error);
}
log.info('initialized management database');
return callback();
});
}
function startManagementListeners(instanceId, token) {
const mode = process.env.MANAGEMENT_MODE || 'push';
if (mode === 'push') {
const url = `${pushEndpoint}/${instanceId}/ws`;
startWSManagementClient(url, token);
} else {
startPollingManagementClient(managementEndpoint, instanceId, token);
}
}
/**
* Initializes Orbit-based management by:
* - creating the management database in metadata
* - generating a key pair for credentials encryption
* - generating an instance-unique ID
* - getting an authentication token for the API
* - loading and applying the latest cached overlay configuration
* - starting a configuration update and metrics push background task
*
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
* initialization process
* @param {function} callback Function to call once the overlay is loaded
* (overlay)
*
* @returns {undefined}
*/
function initManagement(log, callback) {
if ((process.env.REMOTE_MANAGEMENT_DISABLE &&
process.env.REMOTE_MANAGEMENT_DISABLE !== '0')
|| process.env.S3BACKEND === 'mem') {
log.info('remote management disabled');
return;
}
/* Temporary check before to fully move to the process management agent. */
if (isManagementAgentUsed() ^ typeof callback === 'function') {
let msg = 'misuse of initManagement function: ';
msg += `MANAGEMENT_USE_AGENT: ${process.env.MANAGEMENT_USE_AGENT}`;
msg += `, callback type: ${typeof callback}`;
throw new Error(msg);
}
async.waterfall([
// eslint-disable-next-line arrow-body-style
cb => { return isManagementAgentUsed() ? metadata.setup(cb) : cb(); },
cb => initManagementDatabase(log, cb),
cb => metadata.getUUID(log, cb),
(instanceId, cb) => initManagementCredentials(
managementEndpoint, instanceId, log, cb),
(instanceId, token, cb) => {
if (!isManagementAgentUsed()) {
cb(null, instanceId, token, {});
return;
}
loadCachedOverlay(log, (err, overlay) => cb(err, instanceId,
token, overlay));
},
(instanceId, token, overlay, cb) => {
if (!isManagementAgentUsed()) {
cb(null, instanceId, token, overlay);
return;
}
patchConfiguration(overlay, log,
err => cb(err, instanceId, token, overlay));
},
], (error, instanceId, token, overlay) => {
if (error) {
log.error('could not initialize remote management, retrying later',
{ error: reshapeExceptionError(error),
method: 'initManagement' });
setTimeout(initManagement,
initRemoteManagementRetryDelay,
logger.newRequestLogger());
} else {
log.info(`this deployment's Instance ID is ${instanceId}`);
log.end('management init done');
startManagementListeners(instanceId, token);
if (callback) {
callback(overlay);
}
}
});
}
module.exports = {
initManagement,
initManagementDatabase,
};

157
lib/management/poll.js Normal file
View File

@ -0,0 +1,157 @@
const arsenal = require('arsenal');
const async = require('async');
const request = require('../utilities/request');
const _config = require('../Config').config;
const logger = require('../utilities/logger');
const metadata = require('../metadata/wrapper');
const {
loadCachedOverlay,
patchConfiguration,
saveConfigurationVersion,
} = require('./configuration');
const { reshapeExceptionError } = arsenal.errorUtils;
const pushReportDelay = 30000;
const pullConfigurationOverlayDelay = 60000;
function loadRemoteOverlay(
managementEndpoint, instanceId, remoteToken, cachedOverlay, log, cb) {
log.debug('loading remote overlay');
const opts = {
headers: {
'x-instance-authentication-token': remoteToken,
'x-scal-request-id': log.getSerializedUids(),
},
json: true,
};
request.get(`${managementEndpoint}/${instanceId}/config/overlay`, opts,
(error, response, body) => {
if (error) {
return cb(error);
}
if (response.statusCode === 200) {
return cb(null, cachedOverlay, body);
}
if (response.statusCode === 404) {
return cb(null, cachedOverlay, {});
}
return cb(arsenal.errors.AccessForbidden, cachedOverlay, {});
});
}
// TODO save only after successful patch
function applyConfigurationOverlay(
managementEndpoint, instanceId, remoteToken, log) {
async.waterfall([
wcb => loadCachedOverlay(log, wcb),
(cachedOverlay, wcb) => patchConfiguration(cachedOverlay,
log, wcb),
(cachedOverlay, wcb) =>
loadRemoteOverlay(managementEndpoint, instanceId, remoteToken,
cachedOverlay, log, wcb),
(cachedOverlay, remoteOverlay, wcb) =>
saveConfigurationVersion(cachedOverlay, remoteOverlay, log, wcb),
(remoteOverlay, wcb) => patchConfiguration(remoteOverlay,
log, wcb),
], error => {
if (error) {
log.error('could not apply managed configuration',
{ error: reshapeExceptionError(error),
method: 'applyConfigurationOverlay' });
}
setTimeout(applyConfigurationOverlay, pullConfigurationOverlayDelay,
managementEndpoint, instanceId, remoteToken,
logger.newRequestLogger());
});
}
function postStats(managementEndpoint, instanceId, remoteToken, report, next) {
const toURL = `${managementEndpoint}/${instanceId}/stats`;
const toOptions = {
json: true,
headers: {
'content-type': 'application/json',
'x-instance-authentication-token': remoteToken,
},
body: report,
};
const toCallback = (err, response, body) => {
if (err) {
logger.info('could not post stats', { error: err });
}
if (response && response.statusCode !== 201) {
logger.info('could not post stats', {
body,
statusCode: response.statusCode,
});
}
if (next) {
next(null, instanceId, remoteToken);
}
};
return request.post(toURL, toOptions, toCallback);
}
function getStats(next) {
const fromURL = `http://localhost:${_config.port}/_/report`;
const fromOptions = {
headers: {
'x-scal-report-token': process.env.REPORT_TOKEN,
},
};
return request.get(fromURL, fromOptions, next);
}
function pushStats(managementEndpoint, instanceId, remoteToken, next) {
if (process.env.PUSH_STATS === 'false') {
return;
}
getStats((err, res, report) => {
if (err) {
logger.info('could not retrieve stats', { error: err });
return;
}
logger.debug('report', { report });
postStats(
managementEndpoint,
instanceId,
remoteToken,
report,
next
);
return;
});
setTimeout(pushStats, pushReportDelay,
managementEndpoint, instanceId, remoteToken);
}
/**
* Starts background task that updates configuration and pushes stats.
*
* Periodically polls for configuration updates, and pushes stats at
* a fixed interval.
*
* @param {string} managementEndpoint API endpoint
* @param {string} instanceId UUID of this deployment
* @param {string} remoteToken API authentication token
*
* @returns {undefined}
*/
function startPollingManagementClient(
managementEndpoint, instanceId, remoteToken) {
metadata.notifyBucketChange(() => {
pushStats(managementEndpoint, instanceId, remoteToken);
});
pushStats(managementEndpoint, instanceId, remoteToken);
applyConfigurationOverlay(managementEndpoint, instanceId, remoteToken,
logger.newRequestLogger());
}
module.exports = {
startPollingManagementClient,
};

301
lib/management/push.js Normal file
View File

@ -0,0 +1,301 @@
const arsenal = require('arsenal');
const HttpsProxyAgent = require('https-proxy-agent');
const net = require('net');
const request = require('../utilities/request');
const { URL } = require('url');
const WebSocket = require('ws');
const assert = require('assert');
const http = require('http');
const _config = require('../Config').config;
const logger = require('../utilities/logger');
const metadata = require('../metadata/wrapper');
const { reshapeExceptionError } = arsenal.errorUtils;
const { isManagementAgentUsed } = require('./agentClient');
const { applyAndSaveOverlay } = require('./configuration');
const {
ChannelMessageV0,
MessageType,
} = require('./ChannelMessageV0');
const {
CONFIG_OVERLAY_MESSAGE,
METRICS_REQUEST_MESSAGE,
CHANNEL_CLOSE_MESSAGE,
CHANNEL_PAYLOAD_MESSAGE,
} = MessageType;
const PING_INTERVAL_MS = 10000;
const subprotocols = [ChannelMessageV0.protocolName];
const cloudServerHost = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_HOST
|| 'localhost';
const cloudServerPort = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_PORT
|| _config.port;
let overlayMessageListener = null;
let connected = false;
// No wildcard nor cidr/mask match for now
function createWSAgent(pushEndpoint, env, log) {
const url = new URL(pushEndpoint);
const noProxy = (env.NO_PROXY || env.no_proxy
|| '').split(',');
if (noProxy.includes(url.hostname)) {
log.info('push server ws has proxy exclusion', { noProxy });
return null;
}
if (url.protocol === 'https:' || url.protocol === 'wss:') {
const httpsProxy = (env.HTTPS_PROXY || env.https_proxy);
if (httpsProxy) {
log.info('push server ws using https proxy', { httpsProxy });
return new HttpsProxyAgent(httpsProxy);
}
} else if (url.protocol === 'http:' || url.protocol === 'ws:') {
const httpProxy = (env.HTTP_PROXY || env.http_proxy);
if (httpProxy) {
log.info('push server ws using http proxy', { httpProxy });
return new HttpsProxyAgent(httpProxy);
}
}
const allProxy = (env.ALL_PROXY || env.all_proxy);
if (allProxy) {
log.info('push server ws using wildcard proxy', { allProxy });
return new HttpsProxyAgent(allProxy);
}
log.info('push server ws not using proxy');
return null;
}
/**
* Starts background task that updates configuration and pushes stats.
*
* Receives pushed Websocket messages on configuration updates, and
* sends stat messages in response to API sollicitations.
*
* @param {string} url API endpoint
* @param {string} token API authentication token
* @param {function} cb end-of-connection callback
*
* @returns {undefined}
*/
function startWSManagementClient(url, token, cb) {
logger.info('connecting to push server', { url });
function _logError(error, errorMessage, method) {
if (error) {
logger.error(`management client error: ${errorMessage}`,
{ error: reshapeExceptionError(error), method });
}
}
const socketsByChannelId = [];
const headers = {
'x-instance-authentication-token': token,
};
const agent = createWSAgent(url, process.env, logger);
const ws = new WebSocket(url, subprotocols, { headers, agent });
let pingTimeout = null;
function sendPing() {
if (ws.readyState === ws.OPEN) {
ws.ping(err => _logError(err, 'failed to send a ping', 'sendPing'));
}
pingTimeout = setTimeout(() => ws.terminate(), PING_INTERVAL_MS);
}
function initiatePing() {
clearTimeout(pingTimeout);
setTimeout(sendPing, PING_INTERVAL_MS);
}
function pushStats(options) {
if (process.env.PUSH_STATS === 'false') {
return;
}
const fromURL = `http://${cloudServerHost}:${cloudServerPort}/_/report`;
const fromOptions = {
json: true,
headers: {
'x-scal-report-token': process.env.REPORT_TOKEN,
'x-scal-report-skip-cache': Boolean(options && options.noCache),
},
};
request.get(fromURL, fromOptions, (err, response, body) => {
if (err) {
_logError(err, 'failed to get metrics report', 'pushStats');
return;
}
ws.send(ChannelMessageV0.encodeMetricsReportMessage(body),
err => _logError(err, 'failed to send metrics report message',
'pushStats'));
});
}
function closeChannel(channelId) {
const socket = socketsByChannelId[channelId];
if (socket) {
socket.destroy();
delete socketsByChannelId[channelId];
}
}
function receiveChannelData(channelId, payload) {
let socket = socketsByChannelId[channelId];
if (!socket) {
socket = net.createConnection(cloudServerPort, cloudServerHost);
socket.on('data', data => {
ws.send(ChannelMessageV0.
encodeChannelDataMessage(channelId, data), err =>
_logError(err, 'failed to send channel data message',
'receiveChannelData'));
});
socket.on('connect', () => {
});
socket.on('drain', () => {
});
socket.on('error', error => {
logger.error('failed to connect to S3', {
code: error.code,
host: error.address,
port: error.port,
});
});
socket.on('end', () => {
socket.destroy();
socketsByChannelId[channelId] = null;
ws.send(ChannelMessageV0.encodeChannelCloseMessage(channelId),
err => _logError(err,
'failed to send channel close message',
'receiveChannelData'));
});
socketsByChannelId[channelId] = socket;
}
socket.write(payload);
}
function browserAccessChangeHandler() {
if (!_config.browserAccessEnabled) {
socketsByChannelId.forEach(s => s.close());
}
}
ws.on('open', () => {
connected = true;
logger.info('connected to push server');
metadata.notifyBucketChange(() => {
pushStats({ noCache: true });
});
_config.on('browser-access-enabled-change', browserAccessChangeHandler);
initiatePing();
});
const cbOnce = cb ? arsenal.jsutil.once(cb) : null;
ws.on('close', () => {
logger.info('disconnected from push server, reconnecting in 10s');
metadata.notifyBucketChange(null);
_config.removeListener('browser-access-enabled-change',
browserAccessChangeHandler);
setTimeout(startWSManagementClient, 10000, url, token);
connected = false;
if (cbOnce) {
process.nextTick(cbOnce);
}
});
ws.on('error', err => {
connected = false;
logger.error('error from push server connection', {
error: err,
errorMessage: err.message,
});
if (cbOnce) {
process.nextTick(cbOnce, err);
}
});
ws.on('ping', () => {
ws.pong(err => _logError(err, 'failed to send a pong'));
});
ws.on('pong', () => {
initiatePing();
});
ws.on('message', data => {
const log = logger.newRequestLogger();
const message = new ChannelMessageV0(data);
switch (message.getType()) {
case CONFIG_OVERLAY_MESSAGE:
if (!isManagementAgentUsed()) {
applyAndSaveOverlay(JSON.parse(message.getPayload()), log);
} else {
if (overlayMessageListener) {
overlayMessageListener(message.getPayload().toString());
}
}
break;
case METRICS_REQUEST_MESSAGE:
pushStats();
break;
case CHANNEL_CLOSE_MESSAGE:
closeChannel(message.getChannelNumber());
break;
case CHANNEL_PAYLOAD_MESSAGE:
// browserAccessEnabled defaults to true unless explicitly false
if (_config.browserAccessEnabled !== false) {
receiveChannelData(
message.getChannelNumber(), message.getPayload());
}
break;
default:
logger.error('unknown message type from push server',
{ messageType: message.getType() });
}
});
}
function addOverlayMessageListener(callback) {
assert(typeof callback === 'function');
overlayMessageListener = callback;
}
function startPushConnectionHealthCheckServer(cb) {
const server = http.createServer((req, res) => {
if (req.url !== '/_/healthcheck') {
res.writeHead(404);
res.write('Not Found');
} else if (connected) {
res.writeHead(200);
res.write('Connected');
} else {
res.writeHead(503);
res.write('Not Connected');
}
res.end();
});
server.listen(_config.port, cb);
}
module.exports = {
createWSAgent,
startWSManagementClient,
startPushConnectionHealthCheckServer,
addOverlayMessageListener,
};

View File

@ -184,7 +184,7 @@ function validateBucket(bucket, params, log, actionImplicitDenies = {}) {
* @return {undefined} - and call callback with params err, bucket md
*/
function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log, callback) {
const { authInfo, bucketName, objectKey, versionId, getDeleteMarker, request, withVersionId } = params;
const { authInfo, bucketName, objectKey, versionId, getDeleteMarker, request } = params;
let requestType = params.requestType;
if (!Array.isArray(requestType)) {
requestType = [requestType];
@ -242,16 +242,13 @@ function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log,
const needQuotaCheck = requestType => requestType.some(type => actionNeedQuotaCheck[type] ||
actionWithDataDeletion[type]);
const checkQuota = params.checkQuota === undefined ? needQuotaCheck(requestType) : params.checkQuota;
// withVersionId cover cases when an object is being restored with a specific version ID.
// In this case, the storage space was already accounted for when the RestoreObject API call
// was made, so we don't need to add any inflight, but quota must be evaluated.
if (!checkQuota) {
return next(null, bucket, objMD);
}
const contentLength = processBytesToWrite(request.apiMethod, bucket, versionId,
request?.parsedContentLength || 0, objMD, params.destObjMD);
return validateQuotas(request, bucket, request.accountQuotas, requestType, request.apiMethod,
contentLength, withVersionId, log, err => next(err, bucket, objMD));
contentLength, log, err => next(err, bucket, objMD));
},
], (err, bucket, objMD) => {
if (err) {

View File

@ -2,9 +2,9 @@ const MetadataWrapper = require('arsenal').storage.metadata.MetadataWrapper;
const { config } = require('../Config');
const logger = require('../utilities/logger');
const constants = require('../../constants');
const bucketclient = require('bucketclient');
const clientName = config.backends.metadata;
let bucketclient;
let params;
if (clientName === 'mem') {
params = {};
@ -21,7 +21,6 @@ if (clientName === 'mem') {
noDbOpen: null,
};
} else if (clientName === 'scality') {
bucketclient = require('bucketclient');
params = {
bucketdBootstrap: config.bucketd.bootstrap,
bucketdLog: config.bucketd.log,

View File

@ -18,6 +18,11 @@ const locationStorageCheck =
require('./api/apiUtils/object/locationStorageCheck');
const vault = require('./auth/vault');
const metadata = require('./metadata/wrapper');
const { initManagement } = require('./management');
const {
initManagementClient,
isManagementAgentUsed,
} = require('./management/agentClient');
const HttpAgent = require('agentkeepalive');
const QuotaService = require('./quotas/quotas');
@ -51,6 +56,7 @@ const STATS_INTERVAL = 5; // 5 seconds
const STATS_EXPIRY = 30; // 30 seconds
const statsClient = new StatsClient(localCacheClient, STATS_INTERVAL,
STATS_EXPIRY);
const enableRemoteManagement = true;
class S3Server {
/**
@ -321,13 +327,26 @@ class S3Server {
QuotaService?.setup(log);
}
// TODO this should wait for metadata healthcheck to be ok
// TODO only do this in cluster master
if (enableRemoteManagement) {
if (!isManagementAgentUsed()) {
setTimeout(() => {
initManagement(logger.newRequestLogger());
}, 5000);
} else {
initManagementClient();
}
}
this.started = true;
});
}
}
function main() {
let workers = _config.workers || 1;
// TODO: change config to use workers prop. name for clarity
let workers = _config.clusters || 1;
if (process.env.S3BACKEND === 'mem') {
workers = 1;
}

View File

@ -1,12 +1,16 @@
require('werelogs').stderrUtils.catchAndTimestampStderr();
const _config = require('../Config').config;
const { utapiVersion, UtapiServer: utapiServer } = require('utapi');
const vault = require('../auth/vault');
// start utapi server
if (utapiVersion === 1 && _config.utapi) {
const fullConfig = Object.assign({}, _config.utapi,
{ redis: _config.redis, vaultclient: vault });
{ redis: _config.redis });
if (_config.vaultd) {
Object.assign(fullConfig, { vaultd: _config.vaultd });
}
if (_config.https) {
Object.assign(fullConfig, { https: _config.https });
}
// copy healthcheck IPs
if (_config.healthChecks) {
Object.assign(fullConfig, { healthChecks: _config.healthChecks });

View File

@ -1,4 +1,3 @@
require('werelogs').stderrUtils.catchAndTimestampStderr();
const UtapiReindex = require('utapi').UtapiReindex;
const { config } = require('../Config');

View File

@ -1,4 +1,3 @@
require('werelogs').stderrUtils.catchAndTimestampStderr();
const UtapiReplay = require('utapi').UtapiReplay;
const _config = require('../Config').config;

View File

@ -1,11 +1,7 @@
const { configure, Werelogs } = require('werelogs');
const { Werelogs } = require('werelogs');
const _config = require('../Config.js').config;
configure({
level: _config.log.logLevel,
dump: _config.log.dumpLevel,
});
const werelogs = new Werelogs({
level: _config.log.logLevel,
dump: _config.log.dumpLevel,

View File

@ -1,12 +0,0 @@
{
"STANDARD": {
"type": "vitastor",
"objectId": "std",
"legacyAwsBehavior": true,
"details": {
"config_path": "/etc/vitastor/vitastor.conf",
"pool_id": 3,
"metadata_image": "s3-volume-meta"
}
}
}

179
managementAgent.js Normal file
View File

@ -0,0 +1,179 @@
const Uuid = require('uuid');
const WebSocket = require('ws');
const logger = require('./lib/utilities/logger');
const { initManagement } = require('./lib/management');
const _config = require('./lib/Config').config;
const { managementAgentMessageType } = require('./lib/management/agentClient');
const { addOverlayMessageListener } = require('./lib/management/push');
const { saveConfigurationVersion } = require('./lib/management/configuration');
// TODO: auth?
// TODO: werelogs with a specific name.
const CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS = 15000;
class ManagementAgentServer {
constructor() {
this.port = _config.managementAgent.port || 8010;
this.wss = null;
this.loadedOverlay = null;
this.stop = this.stop.bind(this);
process.on('SIGINT', this.stop);
process.on('SIGHUP', this.stop);
process.on('SIGQUIT', this.stop);
process.on('SIGTERM', this.stop);
process.on('SIGPIPE', () => {});
}
start(_cb) {
const cb = _cb || function noop() {};
/* Define REPORT_TOKEN env variable needed by the management
* module. */
process.env.REPORT_TOKEN = process.env.REPORT_TOKEN
|| _config.reportToken
|| Uuid.v4();
initManagement(logger.newRequestLogger(), overlay => {
let error = null;
if (overlay) {
this.loadedOverlay = overlay;
this.startServer();
} else {
error = new Error('failed to init management');
}
return cb(error);
});
}
stop() {
if (!this.wss) {
process.exit(0);
return;
}
this.wss.close(() => {
logger.info('server shutdown');
process.exit(0);
});
}
startServer() {
this.wss = new WebSocket.Server({
port: this.port,
clientTracking: true,
path: '/watch',
});
this.wss.on('connection', this.onConnection.bind(this));
this.wss.on('listening', this.onListening.bind(this));
this.wss.on('error', this.onError.bind(this));
setInterval(this.checkBrokenConnections.bind(this),
CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS);
addOverlayMessageListener(this.onNewOverlay.bind(this));
}
onConnection(socket, request) {
function hearthbeat() {
this.isAlive = true;
}
logger.info('client connected to watch route', {
ip: request.connection.remoteAddress,
});
/* eslint-disable no-param-reassign */
socket.isAlive = true;
socket.on('pong', hearthbeat.bind(socket));
if (socket.readyState !== socket.OPEN) {
logger.error('client socket not in ready state', {
state: socket.readyState,
client: socket._socket._peername,
});
return;
}
const msg = {
messageType: managementAgentMessageType.NEW_OVERLAY,
payload: this.loadedOverlay,
};
socket.send(JSON.stringify(msg), error => {
if (error) {
logger.error('failed to send remoteOverlay to client', {
error,
client: socket._socket._peername,
});
}
});
}
onListening() {
logger.info('websocket server listening',
{ port: this.port });
}
onError(error) {
logger.error('websocket server error', { error });
}
_sendNewOverlayToClient(client) {
if (client.readyState !== client.OPEN) {
logger.error('client socket not in ready state', {
state: client.readyState,
client: client._socket._peername,
});
return;
}
const msg = {
messageType: managementAgentMessageType.NEW_OVERLAY,
payload: this.loadedOverlay,
};
client.send(JSON.stringify(msg), error => {
if (error) {
logger.error(
'failed to send remoteOverlay to management agent client', {
error, client: client._socket._peername,
});
}
});
}
onNewOverlay(remoteOverlay) {
const remoteOverlayObj = JSON.parse(remoteOverlay);
saveConfigurationVersion(
this.loadedOverlay, remoteOverlayObj, logger, err => {
if (err) {
logger.error('failed to save remote overlay', { err });
return;
}
this.loadedOverlay = remoteOverlayObj;
this.wss.clients.forEach(
this._sendNewOverlayToClient.bind(this)
);
});
}
checkBrokenConnections() {
this.wss.clients.forEach(client => {
if (!client.isAlive) {
logger.info('close broken connection', {
client: client._socket._peername,
});
client.terminate();
return;
}
client.isAlive = false;
client.ping();
});
}
}
const server = new ManagementAgentServer();
server.start();

View File

@ -193,123 +193,7 @@ tests:
exp_labels:
severity: critical
# QuotaMetricsNotAvailable (case with bucket quota)
##################################################################################################
- name: Quota metrics not available (bucket quota)
interval: 1m
input_series:
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 1+1x6 0+0x20 1+1x6
- series: s3_cloudserver_quota_buckets_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
alert_rule_test:
- alertname: QuotaMetricsNotAvailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: critical
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []
# QuotaMetricsNotAvailable (case with account quota)
##################################################################################################
- name: Quota metrics not available (account quota)
interval: 1m
input_series:
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 1+1x6 0+0x20 1+1x6
- series: s3_cloudserver_quota_accounts_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
alert_rule_test:
- alertname: QuotaMetricsNotAvailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: critical
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []
# QuotaMetricsNotAvailable (case with both quota quota)
##################################################################################################
- name: Quota metrics not available (account quota)
interval: 1m
input_series:
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 1+1x6 0+0x20 1+1x6
- series: s3_cloudserver_quota_accounts_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
- series: s3_cloudserver_quota_buckets_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
alert_rule_test:
- alertname: QuotaMetricsNotAvailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: critical
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []
# QuotaMetricsNotAvailable (case without quota)
# QuotaMetricsNotAvailable
##################################################################################################
- name: Utilization service Latency
interval: 1m
@ -322,10 +206,25 @@ tests:
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts: []
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts: []
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: critical
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []

View File

@ -6,9 +6,6 @@ x-inputs:
- name: service
type: constant
value: artesca-data-connector-s3api-metrics
- name: reportJob
type: constant
value: artesca-data-ops-report-handler
- name: replicas
type: constant
- name: systemErrorsWarningThreshold
@ -143,10 +140,9 @@ groups:
# but not available for at least half of the S3 services during the last minute
- alert: QuotaMetricsNotAvailable
expr: |
avg(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"})
< ${quotaUnavailabilityThreshold} and
(max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job="${reportJob}"}) > 0 or
max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job="${reportJob}"}) > 0)
avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"}[1m]))
< ${quotaUnavailabilityThreshold}
for: 1m
labels:
severity: warning
annotations:
@ -157,10 +153,8 @@ groups:
# but not available during the last 10 minutes
- alert: QuotaMetricsNotAvailable
expr: |
avg(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"})
< ${quotaUnavailabilityThreshold} and
(max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job="${reportJob}"}) > 0 or
max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job="${reportJob}"}) > 0)
avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"}[1m]))
< ${quotaUnavailabilityThreshold}
for: 10m
labels:
severity: critical

View File

@ -1,6 +1,6 @@
{
"name": "@zenko/cloudserver",
"version": "8.8.27",
"version": "8.8.23",
"description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol",
"main": "index.js",
"engines": {
@ -21,13 +21,14 @@
"dependencies": {
"@azure/storage-blob": "^12.12.0",
"@hapi/joi": "^17.1.0",
"arsenal": "git+https://git.yourcmc.ru/vitalif/zenko-arsenal.git#development/8.1",
"async": "^2.5.0",
"aws-sdk": "^2.905.0",
"arsenal": "git+https://github.com/scality/arsenal#8.1.130",
"async": "~2.5.0",
"aws-sdk": "2.905.0",
"bucketclient": "scality/bucketclient#8.1.9",
"bufferutil": "^4.0.6",
"commander": "^2.9.0",
"cron-parser": "^2.11.0",
"diskusage": "^1.1.3",
"diskusage": "1.1.3",
"google-auto-auth": "^0.9.1",
"http-proxy": "^1.17.0",
"http-proxy-agent": "^4.0.1",
@ -37,45 +38,38 @@
"mongodb": "^5.2.0",
"node-fetch": "^2.6.0",
"node-forge": "^0.7.1",
"npm-run-all": "^4.1.5",
"npm-run-all": "~4.1.5",
"prom-client": "14.2.0",
"request": "^2.81.0",
"scubaclient": "git+https://git.yourcmc.ru/vitalif/zenko-scubaclient.git",
"sql-where-parser": "^2.2.1",
"utapi": "git+https://git.yourcmc.ru/vitalif/zenko-utapi.git",
"scubaclient": "git+https://github.com/scality/scubaclient.git",
"sql-where-parser": "~2.2.1",
"utapi": "github:scality/utapi#8.1.13",
"utf-8-validate": "^5.0.8",
"utf8": "^2.1.1",
"utf8": "~2.1.1",
"uuid": "^8.3.2",
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1",
"vaultclient": "scality/vaultclient#8.3.20",
"werelogs": "scality/werelogs#8.1.4",
"ws": "^5.1.0",
"xml2js": "^0.4.16"
},
"overrides": {
"ltgt": "^2.2.0"
"xml2js": "~0.4.16"
},
"devDependencies": {
"@babel/core": "^7.25.2",
"@babel/preset-env": "^7.25.3",
"babel-loader": "^9.1.3",
"bluebird": "^3.3.1",
"eslint": "^8.14.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git",
"eslint-config-airbnb-base": "^13.1.0",
"eslint-config-scality": "scality/Guidelines#8.2.0",
"eslint-plugin-import": "^2.14.0",
"eslint-plugin-mocha": "^10.1.0",
"express": "^4.17.1",
"ioredis": "^4.9.5",
"istanbul": "^1.0.0-alpha.2",
"istanbul-api": "^1.0.0-alpha.13",
"ioredis": "4.9.5",
"istanbul": "1.0.0-alpha.2",
"istanbul-api": "1.0.0-alpha.13",
"lolex": "^1.4.0",
"mocha": ">=3.1.2",
"mocha": "^2.3.4",
"mocha-junit-reporter": "^1.23.1",
"mocha-multi-reporters": "^1.1.7",
"node-mocks-http": "^1.5.2",
"node-mocks-http": "1.5.2",
"sinon": "^13.0.1",
"tv4": "^1.2.7",
"webpack": "^5.93.0",
"webpack-cli": "^5.1.4"
"tv4": "^1.2.7"
},
"scripts": {
"cloudserver": "S3METADATA=mongodb npm-run-all --parallel start_dataserver start_s3server",

View File

@ -85,25 +85,6 @@ function putObject(bucket, key, size, cb) {
});
}
function putObjectWithCustomHeader(bucket, key, size, vID, cb) {
const request = s3Client.putObject({
Bucket: bucket,
Key: key,
Body: Buffer.alloc(size),
});
request.on('build', () => {
request.httpRequest.headers['x-scal-s3-version-id'] = vID;
});
return request.send((err, data) => {
if (!err && !s3Config.isQuotaInflightEnabled()) {
mockScuba.incrementBytesForBucket(bucket, 0);
}
return cb(err, data);
});
}
function copyObject(bucket, key, sourceSize, cb) {
return s3Client.copyObject({
Bucket: bucket,
@ -699,87 +680,4 @@ function multiObjectDelete(bucket, keys, size, callback) {
},
], done);
});
it('should only evaluate quota and not update inflights for PutObject with the x-scal-s3-version-id header',
done => {
const bucket = 'quota-test-bucket13';
const key = 'quota-test-object';
const size = 100;
let vID = null;
return async.series([
next => createBucket(bucket, true, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, (err, val) => {
assert.ifError(err);
vID = val.VersionId;
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
return next();
},
next => fakeMetadataArchive(bucket, key, vID, {
archiveInfo: {},
restoreRequestedAt: new Date(0).toISOString(),
restoreRequestedDays: 7,
}, next),
// Simulate the real restore
next => putObjectWithCustomHeader(bucket, key, size, vID, err => {
assert.ifError(err);
return next();
}),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
return next();
},
next => deleteVersionID(bucket, key, vID, size, next),
next => deleteBucket(bucket, next),
], done);
});
it('should allow a restore if the quota is full but the objet fits with its reserved storage space',
done => {
const bucket = 'quota-test-bucket15';
const key = 'quota-test-object';
const size = 1000;
let vID = null;
return async.series([
next => createBucket(bucket, true, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, (err, val) => {
assert.ifError(err);
vID = val.VersionId;
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
return next();
},
next => fakeMetadataArchive(bucket, key, vID, {
archiveInfo: {},
restoreRequestedAt: new Date(0).toISOString(),
restoreRequestedDays: 7,
}, next),
// Put an object, the quota should be exceeded
next => putObject(bucket, `${key}-2`, size, err => {
assert.strictEqual(err.code, 'QuotaExceeded');
return next();
}),
// Simulate the real restore
next => putObjectWithCustomHeader(bucket, key, size, vID, err => {
assert.ifError(err);
return next();
}),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
return next();
},
next => deleteVersionID(bucket, key, vID, size, next),
next => deleteBucket(bucket, next),
], done);
});
});

View File

@ -15,15 +15,13 @@ const sourceObject = 'objectsource';
const sourceVersionId = 'vid1';
describe('prepareRequestContexts', () => {
it('should return s3:DeleteObject if multiObjectDelete method', () => {
it('should return null if multiObjectDelete method', () => {
const apiMethod = 'multiObjectDelete';
const request = makeRequest();
const results = prepareRequestContexts(apiMethod, request, sourceBucket,
sourceObject, sourceVersionId);
assert.strictEqual(results.length, 1);
const expectedAction = 's3:DeleteObject';
assert.strictEqual(results[0].getAction(), expectedAction);
assert.strictEqual(results, null);
});
it('should return s3:PutObjectVersion request context action for objectPut method with x-scal-s3-version-id' +

View File

@ -50,7 +50,7 @@ describe('validateQuotas (buckets)', () => {
});
it('should return null if quota is <= 0', done => {
validateQuotas(request, mockBucketNoQuota, {}, [], '', false, false, mockLog, err => {
validateQuotas(request, mockBucketNoQuota, {}, [], '', false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false);
done();
@ -59,7 +59,7 @@ describe('validateQuotas (buckets)', () => {
it('should return null if scuba is disabled', done => {
QuotaService.enabled = false;
validateQuotas(request, mockBucket, {}, [], '', false, false, mockLog, err => {
validateQuotas(request, mockBucket, {}, [], '', false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false);
done();
@ -71,7 +71,7 @@ describe('validateQuotas (buckets)', () => {
const error = new Error('Failed to get metrics');
QuotaService._getLatestMetricsCallback.yields(error);
validateQuotas(request, mockBucket, {}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => {
validateQuotas(request, mockBucket, {}, ['objectPut', 'getObject'], 'objectPut', 1, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
@ -97,7 +97,7 @@ describe('validateQuotas (buckets)', () => {
QuotaService._getLatestMetricsCallback.yields(null, result1);
QuotaService._getLatestMetricsCallback.yields(null, result2);
validateQuotas(request, mockBucket, {}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => {
validateQuotas(request, mockBucket, {}, ['objectPut', 'getObject'], 'objectPut', 1, mockLog, err => {
assert.strictEqual(err.is.QuotaExceeded, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1);
assert.strictEqual(request.finalizerHooks.length, 1);
@ -124,7 +124,7 @@ describe('validateQuotas (buckets)', () => {
QuotaService._getLatestMetricsCallback.yields(null, result1);
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
validateQuotas(request, mockBucket, {}, ['objectDelete'], 'objectDelete', -50, false, mockLog, err => {
validateQuotas(request, mockBucket, {}, ['objectDelete'], 'objectDelete', -50, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
@ -151,7 +151,7 @@ describe('validateQuotas (buckets)', () => {
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
validateQuotas(request, mockBucket, {}, ['objectRestore', 'objectPut'], 'objectRestore',
true, false, mockLog, err => {
true, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledTwice, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
@ -179,7 +179,7 @@ describe('validateQuotas (buckets)', () => {
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
validateQuotas(request, mockBucket, {}, ['objectRestore', 'objectPut'], 'objectRestore',
true, false, mockLog, err => {
true, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledTwice, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
@ -194,33 +194,6 @@ describe('validateQuotas (buckets)', () => {
done();
});
});
it('should evaluate the quotas and not update the inflights when isStorageReserved is true', done => {
const result1 = {
bytesTotal: 80,
};
const result2 = {
bytesTotal: 90,
};
QuotaService._getLatestMetricsCallback.yields(null, result1);
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
validateQuotas(request, mockBucket, {}, ['objectPut'], 'objectPut',
true, true, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectPut',
inflight: 0,
}
), true);
done();
});
});
});
describe('validateQuotas (with accounts)', () => {
@ -251,7 +224,7 @@ describe('validateQuotas (with accounts)', () => {
validateQuotas(request, mockBucketNoQuota, {
account: 'test_1',
quota: 0,
}, [], '', false, false, mockLog, err => {
}, [], '', false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false);
done();
@ -262,7 +235,7 @@ describe('validateQuotas (with accounts)', () => {
validateQuotas(request, mockBucketNoQuota, {
account: 'test_1',
quota: 1000,
}, [], '', false, false, mockLog, err => {
}, [], '', false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false);
done();
@ -274,7 +247,7 @@ describe('validateQuotas (with accounts)', () => {
validateQuotas(request, mockBucket, {
account: 'test_1',
quota: 1000,
}, [], '', false, false, mockLog, err => {
}, [], '', false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false);
done();
@ -289,7 +262,7 @@ describe('validateQuotas (with accounts)', () => {
validateQuotas(request, mockBucket, {
account: 'test_1',
quota: 1000,
}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => {
}, ['objectPut', 'getObject'], 'objectPut', 1, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
@ -318,7 +291,7 @@ describe('validateQuotas (with accounts)', () => {
validateQuotas(request, mockBucketNoQuota, {
account: 'test_1',
quota: 100,
}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => {
}, ['objectPut', 'getObject'], 'objectPut', 1, mockLog, err => {
assert.strictEqual(err.is.QuotaExceeded, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1);
assert.strictEqual(request.finalizerHooks.length, 1);
@ -348,7 +321,7 @@ describe('validateQuotas (with accounts)', () => {
validateQuotas(request, mockBucketNoQuota, {
account: 'test_1',
quota: 1000,
}, ['objectDelete'], 'objectDelete', -50, false, mockLog, err => {
}, ['objectDelete'], 'objectDelete', -50, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
@ -377,7 +350,7 @@ describe('validateQuotas (with accounts)', () => {
validateQuotas(request, mockBucket, {
account: 'test_1',
quota: 1000,
}, ['objectRestore', 'objectPut'], 'objectRestore', true, false, mockLog, err => {
}, ['objectRestore', 'objectPut'], 'objectRestore', true, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 4);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
@ -406,7 +379,7 @@ describe('validateQuotas (with accounts)', () => {
validateQuotas(request, mockBucket, {
account: 'test_1',
quota: 1000,
}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => {
}, ['objectPut', 'getObject'], 'objectPut', 1, mockLog, err => {
assert.strictEqual(err.is.QuotaExceeded, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 2);
assert.strictEqual(request.finalizerHooks.length, 1);
@ -427,7 +400,7 @@ describe('validateQuotas (with accounts)', () => {
validateQuotas(request, mockBucket, {
account: 'test_1',
quota: 1000,
}, ['objectRestore', 'objectPut'], 'objectRestore', true, false, mockLog, err => {
}, ['objectRestore', 'objectPut'], 'objectRestore', true, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 4);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
@ -442,35 +415,6 @@ describe('validateQuotas (with accounts)', () => {
done();
});
});
it('should evaluate the quotas and not update the inflights when isStorageReserved is true', done => {
const result1 = {
bytesTotal: 80,
};
const result2 = {
bytesTotal: 90,
};
QuotaService._getLatestMetricsCallback.yields(null, result1);
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
validateQuotas(request, mockBucket, {
account: 'test_1',
quota: 1000,
}, ['objectPut'], 'objectPut', true, true, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledTwice, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
'account',
'test_1',
null,
{
action: 'objectPut',
inflight: 0,
}
), true);
done();
});
});
});
describe('processBytesToWrite', () => {

View File

@ -1,4 +1,3 @@
const crypto = require('crypto');
const assert = require('assert');
const { errors, storage } = require('arsenal');
@ -8,7 +7,6 @@ const multiObjectDelete = require('../../../lib/api/multiObjectDelete');
const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers');
const DummyRequest = require('../DummyRequest');
const { bucketPut } = require('../../../lib/api/bucketPut');
const metadataWrapper = require('../../../lib/metadata/wrapper');
const objectPut = require('../../../lib/api/objectPut');
const log = new DummyRequestLogger();
@ -27,7 +25,6 @@ const objectKey1 = 'objectName1';
const objectKey2 = 'objectName2';
const metadataUtils = require('../../../lib/metadata/metadataUtils');
const services = require('../../../lib/services');
const { BucketInfo } = require('arsenal/build/lib/models');
const testBucketPutRequest = new DummyRequest({
bucketName,
namespace,
@ -360,43 +357,3 @@ describe('decodeObjectVersion function helper', () => {
assert.deepStrictEqual(ret[1], undefined);
});
});
describe('multiObjectDelete function', () => {
afterEach(() => {
sinon.restore();
});
it('should not authorize the bucket and initial IAM authorization results', done => {
const post = '<Delete><Object><Key>objectname</Key></Object></Delete>';
const request = new DummyRequest({
bucketName: 'bucketname',
objectKey: 'objectname',
parsedHost: 'localhost',
headers: {
'content-md5': crypto.createHash('md5').update(post, 'utf8').digest('base64')
},
post,
url: '/bucketname',
});
const authInfo = makeAuthInfo('123456');
sinon.stub(metadataWrapper, 'getBucket').callsFake((bucketName, log, cb) =>
cb(null, new BucketInfo(
'bucketname',
'123456',
'accountA',
new Date().toISOString(),
15,
)));
multiObjectDelete.multiObjectDelete(authInfo, request, log, (err, res) => {
// Expected result is an access denied on the object, and no error, as the API was authorized
assert.strictEqual(err, null);
assert.strictEqual(
res.includes('<Error><Key>objectname</Key><Code>AccessDenied</Code>'),
true
);
done();
});
});
});

View File

@ -0,0 +1,82 @@
const assert = require('assert');
const {
createWSAgent,
} = require('../../../lib/management/push');
const proxy = 'http://proxy:3128/';
const logger = { info: () => {} };
function testVariableSet(httpProxy, httpsProxy, allProxy, noProxy) {
return () => {
it(`should use ${httpProxy} environment variable`, () => {
let agent = createWSAgent('https://pushserver', {
[httpProxy]: 'http://proxy:3128',
}, logger);
assert.equal(agent, null);
agent = createWSAgent('http://pushserver', {
[httpProxy]: proxy,
}, logger);
assert.equal(agent.proxy.href, proxy);
});
it(`should use ${httpsProxy} environment variable`, () => {
let agent = createWSAgent('http://pushserver', {
[httpsProxy]: proxy,
}, logger);
assert.equal(agent, null);
agent = createWSAgent('https://pushserver', {
[httpsProxy]: proxy,
}, logger);
assert.equal(agent.proxy.href, proxy);
});
it(`should use ${allProxy} environment variable`, () => {
let agent = createWSAgent('http://pushserver', {
[allProxy]: proxy,
}, logger);
assert.equal(agent.proxy.href, proxy);
agent = createWSAgent('https://pushserver', {
[allProxy]: proxy,
}, logger);
assert.equal(agent.proxy.href, proxy);
});
it(`should use ${noProxy} environment variable`, () => {
let agent = createWSAgent('http://pushserver', {
[noProxy]: 'pushserver',
}, logger);
assert.equal(agent, null);
agent = createWSAgent('http://pushserver', {
[noProxy]: 'pushserver',
[httpProxy]: proxy,
}, logger);
assert.equal(agent, null);
agent = createWSAgent('http://pushserver', {
[noProxy]: 'pushserver2',
[httpProxy]: proxy,
}, logger);
assert.equal(agent.proxy.href, proxy);
});
};
}
describe('Websocket connection agent', () => {
describe('with no proxy env', () => {
it('should handle empty proxy environment', () => {
const agent = createWSAgent('https://pushserver', {}, logger);
assert.equal(agent, null);
});
});
describe('with lowercase proxy env',
testVariableSet('http_proxy', 'https_proxy', 'all_proxy', 'no_proxy'));
describe('with uppercase proxy env',
testVariableSet('HTTP_PROXY', 'HTTPS_PROXY', 'ALL_PROXY', 'NO_PROXY'));
});

View File

@ -0,0 +1,239 @@
const assert = require('assert');
const crypto = require('crypto');
const { DummyRequestLogger } = require('../helpers');
const log = new DummyRequestLogger();
const metadata = require('../../../lib/metadata/wrapper');
const managementDatabaseName = 'PENSIEVE';
const tokenConfigurationKey = 'auth/zenko/remote-management-token';
const { privateKey, accessKey, decryptedSecretKey, secretKey, canonicalId,
userName } = require('./resources.json');
const shortid = '123456789012';
const email = 'customaccount1@setbyenv.com';
const arn = 'arn:aws:iam::123456789012:root';
const { config } = require('../../../lib/Config');
const {
remoteOverlayIsNewer,
patchConfiguration,
} = require('../../../lib/management/configuration');
const {
initManagementDatabase,
} = require('../../../lib/management/index');
function initManagementCredentialsMock(cb) {
return metadata.putObjectMD(managementDatabaseName,
tokenConfigurationKey, { privateKey }, {},
log, error => cb(error));
}
function getConfig() {
return config;
}
// Original Config
const overlayVersionOriginal = Object.assign({}, config.overlayVersion);
const authDataOriginal = Object.assign({}, config.authData);
const locationConstraintsOriginal = Object.assign({},
config.locationConstraints);
const restEndpointsOriginal = Object.assign({}, config.restEndpoints);
const browserAccessEnabledOriginal = config.browserAccessEnabled;
const instanceId = '19683e55-56f7-4a4c-98a7-706c07e4ec30';
const publicInstanceId = crypto.createHash('sha256')
.update(instanceId)
.digest('hex');
function resetConfig() {
config.overlayVersion = overlayVersionOriginal;
config.authData = authDataOriginal;
config.locationConstraints = locationConstraintsOriginal;
config.restEndpoints = restEndpointsOriginal;
config.browserAccessEnabled = browserAccessEnabledOriginal;
}
function assertConfig(actualConf, expectedConf) {
Object.keys(expectedConf).forEach(key => {
assert.deepStrictEqual(actualConf[key], expectedConf[key]);
});
}
describe('patchConfiguration', () => {
before(done => initManagementDatabase(log, err => {
if (err) {
return done(err);
}
return initManagementCredentialsMock(done);
}));
beforeEach(() => {
resetConfig();
});
it('should modify config using the new config', done => {
const newConf = {
version: 1,
instanceId,
users: [
{
secretKey,
accessKey,
canonicalId,
userName,
},
],
endpoints: [
{
hostname: '1.1.1.1',
locationName: 'us-east-1',
},
],
locations: {
'us-east-1': {
name: 'us-east-1',
objectId: 'us-east-1',
locationType: 'location-file-v1',
legacyAwsBehavior: true,
details: {},
},
},
browserAccess: {
enabled: true,
},
};
return patchConfiguration(newConf, log, err => {
assert.ifError(err);
const actualConf = getConfig();
const expectedConf = {
overlayVersion: 1,
publicInstanceId,
browserAccessEnabled: true,
authData: {
accounts: [{
name: userName,
email,
arn,
canonicalID: canonicalId,
shortid,
keys: [{
access: accessKey,
secret: decryptedSecretKey,
}],
}],
},
locationConstraints: {
'us-east-1': {
type: 'file',
objectId: 'us-east-1',
legacyAwsBehavior: true,
isTransient: false,
sizeLimitGB: null,
details: { supportsVersioning: true },
name: 'us-east-1',
locationType: 'location-file-v1',
},
},
};
assertConfig(actualConf, expectedConf);
assert.deepStrictEqual(actualConf.restEndpoints['1.1.1.1'],
'us-east-1');
return done();
});
});
it('should apply second configuration if version (2) is greater than ' +
'overlayVersion (1)', done => {
const newConf1 = {
version: 1,
instanceId,
};
const newConf2 = {
version: 2,
instanceId,
browserAccess: {
enabled: true,
},
};
patchConfiguration(newConf1, log, err => {
assert.ifError(err);
return patchConfiguration(newConf2, log, err => {
assert.ifError(err);
const actualConf = getConfig();
const expectedConf = {
overlayVersion: 2,
browserAccessEnabled: true,
};
assertConfig(actualConf, expectedConf);
return done();
});
});
});
it('should not apply the second configuration if version equals ' +
'overlayVersion', done => {
const newConf1 = {
version: 1,
instanceId,
};
const newConf2 = {
version: 1,
instanceId,
browserAccess: {
enabled: true,
},
};
patchConfiguration(newConf1, log, err => {
assert.ifError(err);
return patchConfiguration(newConf2, log, err => {
assert.ifError(err);
const actualConf = getConfig();
const expectedConf = {
overlayVersion: 1,
browserAccessEnabled: undefined,
};
assertConfig(actualConf, expectedConf);
return done();
});
});
});
});
describe('remoteOverlayIsNewer', () => {
it('should return remoteOverlayIsNewer equals false if remote overlay ' +
'is less than the cached', () => {
const cachedOverlay = {
version: 2,
};
const remoteOverlay = {
version: 1,
};
const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay,
remoteOverlay);
assert.equal(isRemoteOverlayNewer, false);
});
it('should return remoteOverlayIsNewer equals false if remote overlay ' +
'and the cached one are equal', () => {
const cachedOverlay = {
version: 1,
};
const remoteOverlay = {
version: 1,
};
const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay,
remoteOverlay);
assert.equal(isRemoteOverlayNewer, false);
});
it('should return remoteOverlayIsNewer equals true if remote overlay ' +
'version is greater than the cached one ', () => {
const cachedOverlay = {
version: 0,
};
const remoteOverlay = {
version: 1,
};
const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay,
remoteOverlay);
assert.equal(isRemoteOverlayNewer, true);
});
});

View File

@ -0,0 +1,9 @@
{
"privateKey": "-----BEGIN RSA PRIVATE KEY-----\r\nMIIEowIBAAKCAQEAj13sSYE40lAX2qpBvfdGfcSVNtBf8i5FH+E8FAhORwwPu+2S\r\n3yBQbgwHq30WWxunGb1NmZL1wkVZ+vf12DtxqFRnMA08LfO4oO6oC4V8XfKeuHyJ\r\n1qlaKRINz6r9yDkTHtwWoBnlAINurlcNKgGD5p7D+G26Chbr/Oo0ZwHula9DxXy6\r\neH8/bJ5/BynyNyyWRPoAO+UkUdY5utkFCUq2dbBIhovMgjjikf5p2oWqnRKXc+JK\r\nBegr6lSHkkhyqNhTmd8+wA+8Cace4sy1ajY1t5V4wfRZea5vwl/HlyyKodvHdxng\r\nJgg6H61JMYPkplY6Gr9OryBKEAgq02zYoYTDfwIDAQABAoIBAAuDYGlavkRteCzw\r\nRU1LIVcSRWVcgIgDXTu9K8T0Ec0008Kkxomyn6LmxmroJbZ1VwsDH8s4eRH73ckA\r\nxrZxt6Pr+0lplq6eBvKtl8MtGhq1VDe+kJczjHEF6SQHOFAu/TEaPZrn2XMcGvRX\r\nO1BnRL9tepFlxm3u/06VRFYNWqqchM+tFyzLu2AuiuKd5+slSX7KZvVgdkY1ErKH\r\ngB75lPyhPb77C/6ptqUisVMSO4JhLhsD0+ekDVY982Sb7KkI+szdWSbtMx9Ek2Wo\r\ntXwJz7I8T7IbODy9aW9G+ydyhMDFmaEYIaDVFKJj5+fluNza3oQ5PtFNVE50GQJA\r\nsisGqfECgYEAwpkwt0KpSamSEH6qknNYPOwxgEuXWoFVzibko7is2tFPvY+YJowb\r\n68MqHIYhf7gHLq2dc5Jg1TTbGqLECjVxp4xLU4c95KBy1J9CPAcuH4xQLDXmeLzP\r\nJ2YgznRocbzAMCDAwafCr3uY9FM7oGDHAi5bE5W11xWx+9MlFExL3JkCgYEAvJp5\r\nf+JGN1W037bQe2QLYUWGszewZsvplnNOeytGQa57w4YdF42lPhMz6Kc/zdzKZpN9\r\njrshiIDhAD5NCno6dwqafBAW9WZl0sn7EnlLhD4Lwm8E9bRHnC9H82yFuqmNrzww\r\nzxBCQogJISwHiVz4EkU48B283ecBn0wT/fAa19cCgYEApKWsnEHgrhy1IxOpCoRh\r\nUhqdv2k1xDPN/8DUjtnAFtwmVcLa/zJopU/Zn4y1ZzSzjwECSTi+iWZRQ/YXXHPf\r\nl92SFjhFW92Niuy8w8FnevXjF6T7PYiy1SkJ9OR1QlZrXc04iiGBDazLu115A7ce\r\nanACS03OLw+CKgl6Q/RR83ECgYBCUngDVoimkMcIHHt3yJiP3ikeAKlRnMdJlsa0\r\nXWVZV4hCG3lDfRXsnEgWuimftNKf+6GdfYSvQdLdiQsCcjT5A4uLsQTByv5nf4uA\r\n1ZKOsFrmRrARzxGXhLDikvj7yP//7USkq+0BBGFhfuAvl7fMhPceyPZPehqB7/jf\r\nxX1LBQKBgAn5GgSXzzS0e06ZlP/VrKxreOHa5Z8wOmqqYQ0QTeczAbNNmuITdwwB\r\nNkbRqpVXRIfuj0BQBegAiix8om1W4it0cwz54IXBwQULxJR1StWxj3jo4QtpMQ+z\r\npVPdB1Ilb9zPV1YvDwRfdS1xsobzznAx56ecsXduZjs9mF61db8Q\r\n-----END RSA PRIVATE KEY-----\r\n",
"publicKey": "-----BEGIN PUBLIC KEY-----\r\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAj13sSYE40lAX2qpBvfdG\r\nfcSVNtBf8i5FH+E8FAhORwwPu+2S3yBQbgwHq30WWxunGb1NmZL1wkVZ+vf12Dtx\r\nqFRnMA08LfO4oO6oC4V8XfKeuHyJ1qlaKRINz6r9yDkTHtwWoBnlAINurlcNKgGD\r\n5p7D+G26Chbr/Oo0ZwHula9DxXy6eH8/bJ5/BynyNyyWRPoAO+UkUdY5utkFCUq2\r\ndbBIhovMgjjikf5p2oWqnRKXc+JKBegr6lSHkkhyqNhTmd8+wA+8Cace4sy1ajY1\r\nt5V4wfRZea5vwl/HlyyKodvHdxngJgg6H61JMYPkplY6Gr9OryBKEAgq02zYoYTD\r\nfwIDAQAB\r\n-----END PUBLIC KEY-----\r\n",
"accessKey": "QXP3VDG3SALNBX2QBJ1C",
"secretKey": "K5FyqZo5uFKfw9QBtn95o6vuPuD0zH/1seIrqPKqGnz8AxALNSx6EeRq7G1I6JJpS1XN13EhnwGn2ipsml3Uf2fQ00YgEmImG8wzGVZm8fWotpVO4ilN4JGyQCah81rNX4wZ9xHqDD7qYR5MyIERxR/osoXfctOwY7GGUjRKJfLOguNUlpaovejg6mZfTvYAiDF+PTO1sKUYqHt1IfKQtsK3dov1EFMBB5pWM7sVfncq/CthKN5M+VHx9Y87qdoP3+7AW+RCBbSDOfQgxvqtS7PIAf10mDl8k2kEURLz+RqChu4O4S0UzbEmtja7wa7WYhYKv/tM/QeW7kyNJMmnPg==",
"decryptedSecretKey": "n7PSZ3U6SgerF9PCNhXYsq3S3fRKVGdZTicGV8Ur",
"canonicalId": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be",
"userName": "orbituser"
}

View File

@ -0,0 +1,43 @@
const assert = require('assert');
const { getCapabilities } = require('../../../lib/utilities/reportHandler');
// Ensures that expected features are enabled even if they
// rely on optional dependencies (such as secureChannelOptimizedPath)
describe('report handler', () => {
it('should report current capabilities', () => {
const c = getCapabilities();
assert.strictEqual(c.locationTypeDigitalOcean, true);
assert.strictEqual(c.locationTypeS3Custom, true);
assert.strictEqual(c.locationTypeSproxyd, true);
assert.strictEqual(c.locationTypeHyperdriveV2, true);
assert.strictEqual(c.locationTypeLocal, true);
assert.strictEqual(c.preferredReadLocation, true);
assert.strictEqual(c.managedLifecycle, true);
assert.strictEqual(c.secureChannelOptimizedPath, true);
assert.strictEqual(c.s3cIngestLocation, true);
});
[
{ value: 'true', result: true },
{ value: 'TRUE', result: true },
{ value: 'tRuE', result: true },
{ value: '1', result: true },
{ value: 'false', result: false },
{ value: 'FALSE', result: false },
{ value: 'FaLsE', result: false },
{ value: '0', result: false },
{ value: 'foo', result: false },
{ value: '', result: true },
{ value: undefined, result: true },
].forEach(param =>
it(`should allow set local file system capability ${param.value}`, () => {
const OLD_ENV = process.env;
if (param.value !== undefined) process.env.LOCAL_VOLUME_CAPABILITY = param.value;
assert.strictEqual(getCapabilities().locationTypeLocal, param.result);
process.env = OLD_ENV;
})
);
});

View File

@ -0,0 +1,72 @@
const assert = require('assert');
const {
ChannelMessageV0,
MessageType,
TargetType,
} = require('../../../lib/management/ChannelMessageV0');
const {
CONFIG_OVERLAY_MESSAGE,
METRICS_REQUEST_MESSAGE,
METRICS_REPORT_MESSAGE,
CHANNEL_CLOSE_MESSAGE,
CHANNEL_PAYLOAD_MESSAGE,
} = MessageType;
const { TARGET_ANY } = TargetType;
describe('ChannelMessageV0', () => {
describe('codec', () => {
it('should roundtrip metrics report', () => {
const b = ChannelMessageV0.encodeMetricsReportMessage({ a: 1 });
const m = new ChannelMessageV0(b);
assert.strictEqual(METRICS_REPORT_MESSAGE, m.getType());
assert.strictEqual(0, m.getChannelNumber());
assert.strictEqual(m.getTarget(), TARGET_ANY);
assert.strictEqual(m.getPayload().toString(), '{"a":1}');
});
it('should roundtrip channel data', () => {
const data = new Buffer('dummydata');
const b = ChannelMessageV0.encodeChannelDataMessage(50, data);
const m = new ChannelMessageV0(b);
assert.strictEqual(CHANNEL_PAYLOAD_MESSAGE, m.getType());
assert.strictEqual(50, m.getChannelNumber());
assert.strictEqual(m.getTarget(), TARGET_ANY);
assert.strictEqual(m.getPayload().toString(), 'dummydata');
});
it('should roundtrip channel close', () => {
const b = ChannelMessageV0.encodeChannelCloseMessage(3);
const m = new ChannelMessageV0(b);
assert.strictEqual(CHANNEL_CLOSE_MESSAGE, m.getType());
assert.strictEqual(3, m.getChannelNumber());
assert.strictEqual(m.getTarget(), TARGET_ANY);
});
});
describe('decoder', () => {
it('should parse metrics request', () => {
const b = new Buffer([METRICS_REQUEST_MESSAGE, 0, 0]);
const m = new ChannelMessageV0(b);
assert.strictEqual(METRICS_REQUEST_MESSAGE, m.getType());
assert.strictEqual(0, m.getChannelNumber());
assert.strictEqual(m.getTarget(), TARGET_ANY);
});
it('should parse overlay push', () => {
const b = new Buffer([CONFIG_OVERLAY_MESSAGE, 0, 0, 34, 65, 34]);
const m = new ChannelMessageV0(b);
assert.strictEqual(CONFIG_OVERLAY_MESSAGE, m.getType());
assert.strictEqual(0, m.getChannelNumber());
assert.strictEqual(m.getTarget(), TARGET_ANY);
assert.strictEqual(m.getPayload().toString(), '"A"');
});
});
});

View File

@ -1,254 +0,0 @@
const assert = require('assert');
const { parseRedisConfig } = require('../../../lib/Config');
describe('parseRedisConfig', () => {
[
{
desc: 'with host and port',
input: {
host: 'localhost',
port: 6479,
},
},
{
desc: 'with host, port and password',
input: {
host: 'localhost',
port: 6479,
password: 'mypass',
},
},
{
desc: 'with host, port and an empty password',
input: {
host: 'localhost',
port: 6479,
password: '',
},
},
{
desc: 'with host, port and an empty retry config',
input: {
host: 'localhost',
port: 6479,
retry: {
},
},
},
{
desc: 'with host, port and a custom retry config',
input: {
host: 'localhost',
port: 6479,
retry: {
connectBackoff: {
min: 10,
max: 1000,
jitter: 0.1,
factor: 1.5,
deadline: 10000,
},
},
},
},
{
desc: 'with a single sentinel and no sentinel password',
input: {
name: 'myname',
sentinels: [
{
host: 'localhost',
port: 16479,
},
],
},
},
{
desc: 'with two sentinels and a sentinel password',
input: {
name: 'myname',
sentinels: [
{
host: '10.20.30.40',
port: 16479,
},
{
host: '10.20.30.41',
port: 16479,
},
],
sentinelPassword: 'mypass',
},
},
{
desc: 'with a sentinel and an empty sentinel password',
input: {
name: 'myname',
sentinels: [
{
host: '10.20.30.40',
port: 16479,
},
],
sentinelPassword: '',
},
},
{
desc: 'with a basic production-like config with sentinels',
input: {
name: 'scality-s3',
password: '',
sentinelPassword: '',
sentinels: [
{
host: 'storage-1',
port: 16379,
},
{
host: 'storage-2',
port: 16379,
},
{
host: 'storage-3',
port: 16379,
},
{
host: 'storage-4',
port: 16379,
},
{
host: 'storage-5',
port: 16379,
},
],
},
},
{
desc: 'with a single sentinel passed as a string',
input: {
name: 'myname',
sentinels: '10.20.30.40:16479',
},
output: {
name: 'myname',
sentinels: [
{
host: '10.20.30.40',
port: 16479,
},
],
},
},
{
desc: 'with a list of sentinels passed as a string',
input: {
name: 'myname',
sentinels: '10.20.30.40:16479,another-host:16480,10.20.30.42:16481',
sentinelPassword: 'mypass',
},
output: {
name: 'myname',
sentinels: [
{
host: '10.20.30.40',
port: 16479,
},
{
host: 'another-host',
port: 16480,
},
{
host: '10.20.30.42',
port: 16481,
},
],
sentinelPassword: 'mypass',
},
},
].forEach(testCase => {
it(`should parse a valid config ${testCase.desc}`, () => {
const redisConfig = parseRedisConfig(testCase.input);
assert.deepStrictEqual(redisConfig, testCase.output || testCase.input);
});
});
[
{
desc: 'that is empty',
input: {},
},
{
desc: 'with only a host',
input: {
host: 'localhost',
},
},
{
desc: 'with only a port',
input: {
port: 6479,
},
},
{
desc: 'with a custom retry config with missing values',
input: {
host: 'localhost',
port: 6479,
retry: {
connectBackoff: {
},
},
},
},
{
desc: 'with a sentinel but no name',
input: {
sentinels: [
{
host: 'localhost',
port: 16479,
},
],
},
},
{
desc: 'with a sentinel but an empty name',
input: {
name: '',
sentinels: [
{
host: 'localhost',
port: 16479,
},
],
},
},
{
desc: 'with an empty list of sentinels',
input: {
name: 'myname',
sentinels: [],
},
},
{
desc: 'with an empty list of sentinels passed as a string',
input: {
name: 'myname',
sentinels: '',
},
},
{
desc: 'with an invalid list of sentinels passed as a string (missing port)',
input: {
name: 'myname',
sentinels: '10.20.30.40:16479,10.20.30.50',
},
},
].forEach(testCase => {
it(`should fail to parse an invalid config ${testCase.desc}`, () => {
assert.throws(() => {
parseRedisConfig(testCase.input);
});
});
});
});

View File

@ -1,48 +0,0 @@
module.exports = {
entry: './index.js',
target: 'node',
devtool: 'source-map',
output: {
filename: 'zenko-vitastor.js'
},
module: {
rules: [
{
test: /.jsx?$/,
use: {
loader: 'babel-loader',
options: {
presets: [ [ "@babel/preset-env", { "targets": { "node": "12.0" }, "exclude": [ "transform-regenerator" ] } ] ],
}
}
},
{
test: /.json$/,
type: 'json'
}
]
},
externals: {
'leveldown': 'commonjs leveldown',
'bufferutil': 'commonjs bufferutil',
'diskusage': 'commonjs diskusage',
'utf-8-validate': 'commonjs utf-8-validate',
'fcntl': 'commonjs fcntl',
'ioctl': 'commonjs ioctl',
'vitastor': 'commonjs vitastor',
'vaultclient': 'commonjs vaultclient',
'bucketclient': 'commonjs bucketclient',
'scality-kms': 'commonjs scality-kms',
'sproxydclient': 'commonjs sproxydclient',
'hdclient': 'commonjs hdclient',
'cdmiclient': 'commonjs cdmiclient',
'kerberos': 'commonjs kerberos',
'@mongodb-js/zstd': 'commonjs @mongodb-js/zstd',
'@aws-sdk/credential-providers': 'commonjs @aws-sdk/credential-providers',
'snappy': 'commonjs snappy',
'mongodb-client-encryption': 'commonjs mongodb-client-encryption'
},
node: {
__dirname: false
}
};

6491
yarn.lock Normal file

File diff suppressed because it is too large Load Diff