Compare commits
5 Commits
developmen
...
bugfix/CLD
Author | SHA1 | Date |
---|---|---|
williamlardier | 591a9f669f | |
williamlardier | e6e1a0efb1 | |
williamlardier | c5b16bcdac | |
williamlardier | 8071eae695 | |
williamlardier | 8fb87c1b57 |
|
@ -20,16 +20,13 @@ jobs:
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
- name: Render and test ${{ matrix.tests.name }}
|
- name: Render and test ${{ matrix.tests.name }}
|
||||||
uses: scality/action-prom-render-test@1.0.3
|
uses: scality/action-prom-render-test@1.0.1
|
||||||
with:
|
with:
|
||||||
alert_file_path: monitoring/alerts.yaml
|
alert_file_path: monitoring/alerts.yaml
|
||||||
test_file_path: ${{ matrix.tests.file }}
|
test_file_path: ${{ matrix.tests.file }}
|
||||||
alert_inputs: |
|
alert_inputs: >-
|
||||||
namespace=zenko
|
namespace=zenko,service=artesca-data-connector-s3api-metrics,replicas=3
|
||||||
service=artesca-data-connector-s3api-metrics
|
|
||||||
reportJob=artesca-data-ops-report-handler
|
|
||||||
replicas=3
|
|
||||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
|
@ -67,8 +67,7 @@ env:
|
||||||
ENABLE_LOCAL_CACHE: "true"
|
ENABLE_LOCAL_CACHE: "true"
|
||||||
REPORT_TOKEN: "report-token-1"
|
REPORT_TOKEN: "report-token-1"
|
||||||
REMOTE_MANAGEMENT_DISABLE: "1"
|
REMOTE_MANAGEMENT_DISABLE: "1"
|
||||||
# https://github.com/git-lfs/git-lfs/issues/5749
|
|
||||||
GIT_CLONE_PROTECTION_ACTIVE: 'false'
|
|
||||||
jobs:
|
jobs:
|
||||||
linting-coverage:
|
linting-coverage:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -150,9 +149,6 @@ jobs:
|
||||||
provenance: false
|
provenance: false
|
||||||
tags: |
|
tags: |
|
||||||
ghcr.io/${{ github.repository }}:${{ github.sha }}
|
ghcr.io/${{ github.repository }}:${{ github.sha }}
|
||||||
labels: |
|
|
||||||
git.repository=${{ github.repository }}
|
|
||||||
git.commit-sha=${{ github.sha }}
|
|
||||||
cache-from: type=gha,scope=cloudserver
|
cache-from: type=gha,scope=cloudserver
|
||||||
cache-to: type=gha,mode=max,scope=cloudserver
|
cache-to: type=gha,mode=max,scope=cloudserver
|
||||||
- name: Build and push pykmip image
|
- name: Build and push pykmip image
|
||||||
|
@ -162,9 +158,6 @@ jobs:
|
||||||
context: .github/pykmip
|
context: .github/pykmip
|
||||||
tags: |
|
tags: |
|
||||||
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
|
||||||
labels: |
|
|
||||||
git.repository=${{ github.repository }}
|
|
||||||
git.commit-sha=${{ github.sha }}
|
|
||||||
cache-from: type=gha,scope=pykmip
|
cache-from: type=gha,scope=pykmip
|
||||||
cache-to: type=gha,mode=max,scope=pykmip
|
cache-to: type=gha,mode=max,scope=pykmip
|
||||||
- name: Build and push MongoDB
|
- name: Build and push MongoDB
|
||||||
|
|
175
README.md
175
README.md
|
@ -1,7 +1,10 @@
|
||||||
# Zenko CloudServer with Vitastor Backend
|
# Zenko CloudServer
|
||||||
|
|
||||||
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
|
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
|
||||||
|
|
||||||
|
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/zenko/cloudserver)
|
||||||
|
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
|
||||||
|
@ -11,71 +14,137 @@ Scality’s Open Source Multi-Cloud Data Controller.
|
||||||
CloudServer provides a single AWS S3 API interface to access multiple
|
CloudServer provides a single AWS S3 API interface to access multiple
|
||||||
backend data storage both on-premise or public in the cloud.
|
backend data storage both on-premise or public in the cloud.
|
||||||
|
|
||||||
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
|
CloudServer is useful for Developers, either to run as part of a
|
||||||
backend support.
|
continous integration test environment to emulate the AWS S3 service locally
|
||||||
|
or as an abstraction layer to develop object storage enabled
|
||||||
|
application on the go.
|
||||||
|
|
||||||
## Quick Start with Vitastor
|
## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
|
||||||
|
|
||||||
Vitastor Backend is in experimental status, however you can already try to
|
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
|
||||||
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
|
|
||||||
it works too 😊.
|
|
||||||
|
|
||||||
Installation instructions:
|
## Docker
|
||||||
|
|
||||||
### Install Vitastor
|
[Run your Zenko CloudServer with Docker](https://hub.docker.com/r/zenko/cloudserver/)
|
||||||
|
|
||||||
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
|
## Contributing
|
||||||
|
|
||||||
### Install Zenko with Vitastor Backend
|
In order to contribute, please follow the
|
||||||
|
[Contributing Guidelines](
|
||||||
|
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
|
||||||
|
|
||||||
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
|
## Installation
|
||||||
- Install dependencies: `npm install --omit dev` or just `npm install`
|
|
||||||
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
|
|
||||||
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
|
|
||||||
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
|
|
||||||
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
|
|
||||||
|
|
||||||
### Install and Configure MongoDB
|
### Dependencies
|
||||||
|
|
||||||
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
|
Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x
|
||||||
|
. Up-to-date versions can be found at
|
||||||
|
[Nodesource](https://github.com/nodesource/distributions).
|
||||||
|
|
||||||
### Setup Zenko
|
### Clone source code
|
||||||
|
|
||||||
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
|
```shell
|
||||||
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
|
git clone https://github.com/scality/S3.git
|
||||||
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
|
|
||||||
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
|
|
||||||
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
|
|
||||||
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
|
|
||||||
access keys, but it's not published, so let's use a file for now.
|
|
||||||
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
|
|
||||||
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
|
|
||||||
in this file.
|
|
||||||
|
|
||||||
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
|
|
||||||
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
|
|
||||||
|
|
||||||
### Start Zenko
|
|
||||||
|
|
||||||
Start the S3 server with: `node index.js`
|
|
||||||
|
|
||||||
If you use default settings, Zenko CloudServer starts on port 8000.
|
|
||||||
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
|
|
||||||
|
|
||||||
Now you can access your S3 with `s3cmd` or `geesefs`:
|
|
||||||
|
|
||||||
```
|
|
||||||
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
|
|
||||||
```
|
```
|
||||||
|
|
||||||
```
|
### Install js dependencies
|
||||||
AWS_ACCESS_KEY_ID=accessKey1 \
|
|
||||||
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
|
Go to the ./S3 folder,
|
||||||
geesefs --endpoint http://localhost:8000 testbucket mountdir
|
|
||||||
|
```shell
|
||||||
|
yarn install --frozen-lockfile
|
||||||
```
|
```
|
||||||
|
|
||||||
# Author & License
|
If you get an error regarding installation of the diskUsage module,
|
||||||
|
please install g++.
|
||||||
|
|
||||||
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
|
If you get an error regarding level-down bindings, try clearing your yarn cache:
|
||||||
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
|
|
||||||
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)
|
```shell
|
||||||
|
yarn cache clean
|
||||||
|
```
|
||||||
|
|
||||||
|
## Run it with a file backend
|
||||||
|
|
||||||
|
```shell
|
||||||
|
yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
|
||||||
|
9991 are also open locally for internal transfer of metadata and data,
|
||||||
|
respectively.
|
||||||
|
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
By default the metadata files will be saved in the
|
||||||
|
localMetadata directory and the data files will be saved
|
||||||
|
in the localData directory within the ./S3 directory on your
|
||||||
|
machine. These directories have been pre-created within the
|
||||||
|
repository. If you would like to save the data or metadata in
|
||||||
|
different locations of your choice, you must specify them with absolute paths.
|
||||||
|
So, when starting the server:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
mkdir -m 700 $(pwd)/myFavoriteDataPath
|
||||||
|
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
|
||||||
|
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
|
||||||
|
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
|
||||||
|
yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
## Run it with multiple data backends
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export S3DATA='multiple'
|
||||||
|
yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000.
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
With multiple backends, you have the ability to
|
||||||
|
choose where each object will be saved by setting
|
||||||
|
the following header with a locationConstraint on
|
||||||
|
a PUT request:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
|
||||||
|
```
|
||||||
|
|
||||||
|
If no header is sent with a PUT object request, the
|
||||||
|
location constraint of the bucket will determine
|
||||||
|
where the data is saved. If the bucket has no location
|
||||||
|
constraint, the endpoint of the PUT request will be
|
||||||
|
used to determine location.
|
||||||
|
|
||||||
|
See the Configuration section in our documentation
|
||||||
|
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
|
||||||
|
to learn how to set location constraints.
|
||||||
|
|
||||||
|
## Run it with an in-memory backend
|
||||||
|
|
||||||
|
```shell
|
||||||
|
yarn run mem_backend
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer on port 8000.
|
||||||
|
The default access key is accessKey1 with
|
||||||
|
a secret key of verySecretKey1.
|
||||||
|
|
||||||
|
## Run it with Vault user management
|
||||||
|
|
||||||
|
Note: Vault is proprietary and must be accessed separately.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
export S3VAULT=vault
|
||||||
|
yarn start
|
||||||
|
```
|
||||||
|
|
||||||
|
This starts a Zenko CloudServer using Vault for user management.
|
||||||
|
|
||||||
|
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
|
||||||
|
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
|
||||||
|
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
|
||||||
|
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
#!/usr/bin/env node
|
||||||
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
|
const {
|
||||||
|
startWSManagementClient,
|
||||||
|
startPushConnectionHealthCheckServer,
|
||||||
|
} = require('../lib/management/push');
|
||||||
|
|
||||||
|
const logger = require('../lib/utilities/logger');
|
||||||
|
|
||||||
|
const {
|
||||||
|
PUSH_ENDPOINT: pushEndpoint,
|
||||||
|
INSTANCE_ID: instanceId,
|
||||||
|
MANAGEMENT_TOKEN: managementToken,
|
||||||
|
} = process.env;
|
||||||
|
|
||||||
|
if (!pushEndpoint) {
|
||||||
|
logger.error('missing push endpoint env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!instanceId) {
|
||||||
|
logger.error('missing instance id env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!managementToken) {
|
||||||
|
logger.error('missing management token env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
startPushConnectionHealthCheckServer(err => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('could not start healthcheck server', { error: err });
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
const url = `${pushEndpoint}/${instanceId}/ws?metrics=1`;
|
||||||
|
startWSManagementClient(url, managementToken, err => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('connection failed, exiting', { error: err });
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
logger.info('no more connection, exiting');
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
});
|
|
@ -0,0 +1,46 @@
|
||||||
|
#!/usr/bin/env node
|
||||||
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
|
const {
|
||||||
|
startWSManagementClient,
|
||||||
|
startPushConnectionHealthCheckServer,
|
||||||
|
} = require('../lib/management/push');
|
||||||
|
|
||||||
|
const logger = require('../lib/utilities/logger');
|
||||||
|
|
||||||
|
const {
|
||||||
|
PUSH_ENDPOINT: pushEndpoint,
|
||||||
|
INSTANCE_ID: instanceId,
|
||||||
|
MANAGEMENT_TOKEN: managementToken,
|
||||||
|
} = process.env;
|
||||||
|
|
||||||
|
if (!pushEndpoint) {
|
||||||
|
logger.error('missing push endpoint env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!instanceId) {
|
||||||
|
logger.error('missing instance id env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!managementToken) {
|
||||||
|
logger.error('missing management token env var');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
startPushConnectionHealthCheckServer(err => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('could not start healthcheck server', { error: err });
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
const url = `${pushEndpoint}/${instanceId}/ws?proxy=1`;
|
||||||
|
startWSManagementClient(url, managementToken, err => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('connection failed, exiting', { error: err });
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
logger.info('no more connection, exiting');
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
});
|
|
@ -4,7 +4,6 @@
|
||||||
"metricsPort": 8002,
|
"metricsPort": 8002,
|
||||||
"metricsListenOn": [],
|
"metricsListenOn": [],
|
||||||
"replicationGroupId": "RG001",
|
"replicationGroupId": "RG001",
|
||||||
"workers": 4,
|
|
||||||
"restEndpoints": {
|
"restEndpoints": {
|
||||||
"localhost": "us-east-1",
|
"localhost": "us-east-1",
|
||||||
"127.0.0.1": "us-east-1",
|
"127.0.0.1": "us-east-1",
|
||||||
|
@ -102,14 +101,6 @@
|
||||||
"readPreference": "primary",
|
"readPreference": "primary",
|
||||||
"database": "metadata"
|
"database": "metadata"
|
||||||
},
|
},
|
||||||
"authdata": "authdata.json",
|
|
||||||
"backends": {
|
|
||||||
"auth": "file",
|
|
||||||
"data": "file",
|
|
||||||
"metadata": "mongodb",
|
|
||||||
"kms": "file",
|
|
||||||
"quota": "none"
|
|
||||||
},
|
|
||||||
"externalBackends": {
|
"externalBackends": {
|
||||||
"aws_s3": {
|
"aws_s3": {
|
||||||
"httpAgent": {
|
"httpAgent": {
|
|
@ -1,71 +0,0 @@
|
||||||
{
|
|
||||||
"port": 8000,
|
|
||||||
"listenOn": [],
|
|
||||||
"metricsPort": 8002,
|
|
||||||
"metricsListenOn": [],
|
|
||||||
"replicationGroupId": "RG001",
|
|
||||||
"restEndpoints": {
|
|
||||||
"localhost": "STANDARD",
|
|
||||||
"127.0.0.1": "STANDARD",
|
|
||||||
"yourhostname.ru": "STANDARD"
|
|
||||||
},
|
|
||||||
"websiteEndpoints": [
|
|
||||||
"static.yourhostname.ru"
|
|
||||||
],
|
|
||||||
"replicationEndpoints": [ {
|
|
||||||
"site": "zenko",
|
|
||||||
"servers": ["127.0.0.1:8000"],
|
|
||||||
"default": true
|
|
||||||
} ],
|
|
||||||
"log": {
|
|
||||||
"logLevel": "info",
|
|
||||||
"dumpLevel": "error"
|
|
||||||
},
|
|
||||||
"healthChecks": {
|
|
||||||
"allowFrom": ["127.0.0.1/8", "::1"]
|
|
||||||
},
|
|
||||||
"backends": {
|
|
||||||
"metadata": "mongodb"
|
|
||||||
},
|
|
||||||
"mongodb": {
|
|
||||||
"replicaSetHosts": "127.0.0.1:27017",
|
|
||||||
"writeConcern": "majority",
|
|
||||||
"replicaSet": "rs0",
|
|
||||||
"readPreference": "primary",
|
|
||||||
"database": "s3",
|
|
||||||
"authCredentials": {
|
|
||||||
"username": "s3",
|
|
||||||
"password": ""
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"externalBackends": {
|
|
||||||
"aws_s3": {
|
|
||||||
"httpAgent": {
|
|
||||||
"keepAlive": false,
|
|
||||||
"keepAliveMsecs": 1000,
|
|
||||||
"maxFreeSockets": 256,
|
|
||||||
"maxSockets": null
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"gcp": {
|
|
||||||
"httpAgent": {
|
|
||||||
"keepAlive": true,
|
|
||||||
"keepAliveMsecs": 1000,
|
|
||||||
"maxFreeSockets": 256,
|
|
||||||
"maxSockets": null
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
"requests": {
|
|
||||||
"viaProxy": false,
|
|
||||||
"trustedProxyCIDRs": [],
|
|
||||||
"extractClientIPFromHeader": ""
|
|
||||||
},
|
|
||||||
"bucketNotificationDestinations": [
|
|
||||||
{
|
|
||||||
"resource": "target1",
|
|
||||||
"type": "dummy",
|
|
||||||
"host": "localhost:6000"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
|
@ -116,7 +116,7 @@ const constants = {
|
||||||
],
|
],
|
||||||
|
|
||||||
// user metadata header to set object locationConstraint
|
// user metadata header to set object locationConstraint
|
||||||
objectLocationConstraintHeader: 'x-amz-storage-class',
|
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
|
||||||
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
|
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
|
||||||
legacyLocations: ['sproxyd', 'legacy'],
|
legacyLocations: ['sproxyd', 'legacy'],
|
||||||
// declare here all existing service accounts and their properties
|
// declare here all existing service accounts and their properties
|
||||||
|
@ -205,6 +205,9 @@ const constants = {
|
||||||
],
|
],
|
||||||
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
allowedUtapiEventFilterStates: ['allow', 'deny'],
|
||||||
allowedRestoreObjectRequestTierValues: ['Standard'],
|
allowedRestoreObjectRequestTierValues: ['Standard'],
|
||||||
|
validStorageClasses: [
|
||||||
|
'STANDARD',
|
||||||
|
],
|
||||||
lifecycleListing: {
|
lifecycleListing: {
|
||||||
CURRENT_TYPE: 'current',
|
CURRENT_TYPE: 'current',
|
||||||
NON_CURRENT_TYPE: 'noncurrent',
|
NON_CURRENT_TYPE: 'noncurrent',
|
||||||
|
|
12
index.js
12
index.js
|
@ -1,10 +1,10 @@
|
||||||
'use strict'; // eslint-disable-line strict
|
'use strict'; // eslint-disable-line strict
|
||||||
|
|
||||||
require('werelogs').stderrUtils.catchAndTimestampStderr(
|
/**
|
||||||
undefined,
|
* Catch uncaught exceptions and add timestamp to aid debugging
|
||||||
// Do not exit as workers have their own listener that will exit
|
*/
|
||||||
// But primary don't have another listener
|
process.on('uncaughtException', err => {
|
||||||
require('cluster').isPrimary ? 1 : null,
|
process.stderr.write(`${new Date().toISOString()}: Uncaught exception: \n${err.stack}`);
|
||||||
);
|
});
|
||||||
|
|
||||||
require('./lib/server.js')();
|
require('./lib/server.js')();
|
||||||
|
|
245
lib/Config.js
245
lib/Config.js
|
@ -107,47 +107,6 @@ function parseSproxydConfig(configSproxyd) {
|
||||||
return joi.attempt(configSproxyd, joiSchema, 'bad config');
|
return joi.attempt(configSproxyd, joiSchema, 'bad config');
|
||||||
}
|
}
|
||||||
|
|
||||||
function parseRedisConfig(redisConfig) {
|
|
||||||
const joiSchema = joi.object({
|
|
||||||
password: joi.string().allow(''),
|
|
||||||
host: joi.string(),
|
|
||||||
port: joi.number(),
|
|
||||||
retry: joi.object({
|
|
||||||
connectBackoff: joi.object({
|
|
||||||
min: joi.number().required(),
|
|
||||||
max: joi.number().required(),
|
|
||||||
jitter: joi.number().required(),
|
|
||||||
factor: joi.number().required(),
|
|
||||||
deadline: joi.number().required(),
|
|
||||||
}),
|
|
||||||
}),
|
|
||||||
// sentinel config
|
|
||||||
sentinels: joi.alternatives().try(
|
|
||||||
joi.string()
|
|
||||||
.pattern(/^[a-zA-Z0-9.-]+:[0-9]+(,[a-zA-Z0-9.-]+:[0-9]+)*$/)
|
|
||||||
.custom(hosts => hosts.split(',').map(item => {
|
|
||||||
const [host, port] = item.split(':');
|
|
||||||
return { host, port: Number.parseInt(port, 10) };
|
|
||||||
})),
|
|
||||||
joi.array().items(
|
|
||||||
joi.object({
|
|
||||||
host: joi.string().required(),
|
|
||||||
port: joi.number().required(),
|
|
||||||
})
|
|
||||||
).min(1),
|
|
||||||
),
|
|
||||||
name: joi.string(),
|
|
||||||
sentinelPassword: joi.string().allow(''),
|
|
||||||
})
|
|
||||||
.and('host', 'port')
|
|
||||||
.and('sentinels', 'name')
|
|
||||||
.xor('host', 'sentinels')
|
|
||||||
.without('sentinels', ['host', 'port'])
|
|
||||||
.without('host', ['sentinels', 'sentinelPassword']);
|
|
||||||
|
|
||||||
return joi.attempt(redisConfig, joiSchema, 'bad config');
|
|
||||||
}
|
|
||||||
|
|
||||||
function restEndpointsAssert(restEndpoints, locationConstraints) {
|
function restEndpointsAssert(restEndpoints, locationConstraints) {
|
||||||
assert(typeof restEndpoints === 'object',
|
assert(typeof restEndpoints === 'object',
|
||||||
'bad config: restEndpoints must be an object of endpoints');
|
'bad config: restEndpoints must be an object of endpoints');
|
||||||
|
@ -377,7 +336,7 @@ function dmfLocationConstraintAssert(locationObj) {
|
||||||
function locationConstraintAssert(locationConstraints) {
|
function locationConstraintAssert(locationConstraints) {
|
||||||
const supportedBackends =
|
const supportedBackends =
|
||||||
['mem', 'file', 'scality',
|
['mem', 'file', 'scality',
|
||||||
'mongodb', 'dmf', 'azure_archive', 'vitastor'].concat(Object.keys(validExternalBackends));
|
'mongodb', 'dmf', 'azure_archive'].concat(Object.keys(validExternalBackends));
|
||||||
assert(typeof locationConstraints === 'object',
|
assert(typeof locationConstraints === 'object',
|
||||||
'bad config: locationConstraints must be an object');
|
'bad config: locationConstraints must be an object');
|
||||||
Object.keys(locationConstraints).forEach(l => {
|
Object.keys(locationConstraints).forEach(l => {
|
||||||
|
@ -502,23 +461,27 @@ function locationConstraintAssert(locationConstraints) {
|
||||||
locationConstraints[l].details.connector.hdclient);
|
locationConstraints[l].details.connector.hdclient);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
assert(Object.keys(locationConstraints)
|
||||||
|
.includes('us-east-1'), 'bad locationConfig: must ' +
|
||||||
|
'include us-east-1 as a locationConstraint');
|
||||||
}
|
}
|
||||||
|
|
||||||
function parseUtapiReindex(config) {
|
function parseUtapiReindex(config) {
|
||||||
const {
|
const {
|
||||||
enabled,
|
enabled,
|
||||||
schedule,
|
schedule,
|
||||||
redis,
|
sentinel,
|
||||||
bucketd,
|
bucketd,
|
||||||
onlyCountLatestWhenObjectLocked,
|
onlyCountLatestWhenObjectLocked,
|
||||||
} = config;
|
} = config;
|
||||||
assert(typeof enabled === 'boolean',
|
assert(typeof enabled === 'boolean',
|
||||||
'bad config: utapi.reindex.enabled must be a boolean');
|
'bad config: utapi.reindex.enabled must be a boolean');
|
||||||
|
assert(typeof sentinel === 'object',
|
||||||
const parsedRedis = parseRedisConfig(redis);
|
'bad config: utapi.reindex.sentinel must be an object');
|
||||||
assert(Array.isArray(parsedRedis.sentinels),
|
assert(typeof sentinel.port === 'number',
|
||||||
'bad config: utapi reindex redis config requires a list of sentinels');
|
'bad config: utapi.reindex.sentinel.port must be a number');
|
||||||
|
assert(typeof sentinel.name === 'string',
|
||||||
|
'bad config: utapi.reindex.sentinel.name must be a string');
|
||||||
assert(typeof bucketd === 'object',
|
assert(typeof bucketd === 'object',
|
||||||
'bad config: utapi.reindex.bucketd must be an object');
|
'bad config: utapi.reindex.bucketd must be an object');
|
||||||
assert(typeof bucketd.port === 'number',
|
assert(typeof bucketd.port === 'number',
|
||||||
|
@ -536,13 +499,6 @@ function parseUtapiReindex(config) {
|
||||||
'bad config: utapi.reindex.schedule must be a valid ' +
|
'bad config: utapi.reindex.schedule must be a valid ' +
|
||||||
`cron schedule. ${e.message}.`);
|
`cron schedule. ${e.message}.`);
|
||||||
}
|
}
|
||||||
return {
|
|
||||||
enabled,
|
|
||||||
schedule,
|
|
||||||
redis: parsedRedis,
|
|
||||||
bucketd,
|
|
||||||
onlyCountLatestWhenObjectLocked,
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function requestsConfigAssert(requestsConfig) {
|
function requestsConfigAssert(requestsConfig) {
|
||||||
|
@ -630,6 +586,7 @@ class Config extends EventEmitter {
|
||||||
// Read config automatically
|
// Read config automatically
|
||||||
this._getLocationConfig();
|
this._getLocationConfig();
|
||||||
this._getConfig();
|
this._getConfig();
|
||||||
|
this._configureBackends();
|
||||||
}
|
}
|
||||||
|
|
||||||
_getLocationConfig() {
|
_getLocationConfig() {
|
||||||
|
@ -841,11 +798,11 @@ class Config extends EventEmitter {
|
||||||
this.websiteEndpoints = config.websiteEndpoints;
|
this.websiteEndpoints = config.websiteEndpoints;
|
||||||
}
|
}
|
||||||
|
|
||||||
this.workers = false;
|
this.clusters = false;
|
||||||
if (config.workers !== undefined) {
|
if (config.clusters !== undefined) {
|
||||||
assert(Number.isInteger(config.workers) && config.workers > 0,
|
assert(Number.isInteger(config.clusters) && config.clusters > 0,
|
||||||
'bad config: workers must be a positive integer');
|
'bad config: clusters must be a positive integer');
|
||||||
this.workers = config.workers;
|
this.clusters = config.clusters;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.usEastBehavior !== undefined) {
|
if (config.usEastBehavior !== undefined) {
|
||||||
|
@ -1083,7 +1040,8 @@ class Config extends EventEmitter {
|
||||||
assert(typeof config.localCache.port === 'number',
|
assert(typeof config.localCache.port === 'number',
|
||||||
'config: bad port for localCache. port must be a number');
|
'config: bad port for localCache. port must be a number');
|
||||||
if (config.localCache.password !== undefined) {
|
if (config.localCache.password !== undefined) {
|
||||||
assert(typeof config.localCache.password === 'string',
|
assert(
|
||||||
|
this._verifyRedisPassword(config.localCache.password),
|
||||||
'config: vad password for localCache. password must' +
|
'config: vad password for localCache. password must' +
|
||||||
' be a string');
|
' be a string');
|
||||||
}
|
}
|
||||||
|
@ -1109,7 +1067,55 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.redis) {
|
if (config.redis) {
|
||||||
this.redis = parseRedisConfig(config.redis);
|
if (config.redis.sentinels) {
|
||||||
|
this.redis = { sentinels: [], name: null };
|
||||||
|
|
||||||
|
assert(typeof config.redis.name === 'string',
|
||||||
|
'bad config: redis sentinel name must be a string');
|
||||||
|
this.redis.name = config.redis.name;
|
||||||
|
assert(Array.isArray(config.redis.sentinels) ||
|
||||||
|
typeof config.redis.sentinels === 'string',
|
||||||
|
'bad config: redis sentinels must be an array or string');
|
||||||
|
|
||||||
|
if (typeof config.redis.sentinels === 'string') {
|
||||||
|
config.redis.sentinels.split(',').forEach(item => {
|
||||||
|
const [host, port] = item.split(':');
|
||||||
|
this.redis.sentinels.push({ host,
|
||||||
|
port: Number.parseInt(port, 10) });
|
||||||
|
});
|
||||||
|
} else if (Array.isArray(config.redis.sentinels)) {
|
||||||
|
config.redis.sentinels.forEach(item => {
|
||||||
|
const { host, port } = item;
|
||||||
|
assert(typeof host === 'string',
|
||||||
|
'bad config: redis sentinel host must be a string');
|
||||||
|
assert(typeof port === 'number',
|
||||||
|
'bad config: redis sentinel port must be a number');
|
||||||
|
this.redis.sentinels.push({ host, port });
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if (config.redis.sentinelPassword !== undefined) {
|
||||||
|
assert(
|
||||||
|
this._verifyRedisPassword(config.redis.sentinelPassword));
|
||||||
|
this.redis.sentinelPassword = config.redis.sentinelPassword;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// check for standalone configuration
|
||||||
|
this.redis = {};
|
||||||
|
assert(typeof config.redis.host === 'string',
|
||||||
|
'bad config: redis.host must be a string');
|
||||||
|
assert(typeof config.redis.port === 'number',
|
||||||
|
'bad config: redis.port must be a number');
|
||||||
|
this.redis.host = config.redis.host;
|
||||||
|
this.redis.port = config.redis.port;
|
||||||
|
}
|
||||||
|
if (config.redis.password !== undefined) {
|
||||||
|
assert(
|
||||||
|
this._verifyRedisPassword(config.redis.password),
|
||||||
|
'bad config: invalid password for redis. password must ' +
|
||||||
|
'be a string');
|
||||||
|
this.redis.password = config.redis.password;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (config.scuba) {
|
if (config.scuba) {
|
||||||
this.scuba = {};
|
this.scuba = {};
|
||||||
|
@ -1177,8 +1183,50 @@ class Config extends EventEmitter {
|
||||||
assert(config.redis, 'missing required property of utapi ' +
|
assert(config.redis, 'missing required property of utapi ' +
|
||||||
'configuration: redis');
|
'configuration: redis');
|
||||||
if (config.utapi.redis) {
|
if (config.utapi.redis) {
|
||||||
this.utapi.redis = parseRedisConfig(config.utapi.redis);
|
if (config.utapi.redis.sentinels) {
|
||||||
if (this.utapi.redis.retry === undefined) {
|
this.utapi.redis = { sentinels: [], name: null };
|
||||||
|
|
||||||
|
assert(typeof config.utapi.redis.name === 'string',
|
||||||
|
'bad config: redis sentinel name must be a string');
|
||||||
|
this.utapi.redis.name = config.utapi.redis.name;
|
||||||
|
|
||||||
|
assert(Array.isArray(config.utapi.redis.sentinels),
|
||||||
|
'bad config: redis sentinels must be an array');
|
||||||
|
config.utapi.redis.sentinels.forEach(item => {
|
||||||
|
const { host, port } = item;
|
||||||
|
assert(typeof host === 'string',
|
||||||
|
'bad config: redis sentinel host must be a string');
|
||||||
|
assert(typeof port === 'number',
|
||||||
|
'bad config: redis sentinel port must be a number');
|
||||||
|
this.utapi.redis.sentinels.push({ host, port });
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
// check for standalone configuration
|
||||||
|
this.utapi.redis = {};
|
||||||
|
assert(typeof config.utapi.redis.host === 'string',
|
||||||
|
'bad config: redis.host must be a string');
|
||||||
|
assert(typeof config.utapi.redis.port === 'number',
|
||||||
|
'bad config: redis.port must be a number');
|
||||||
|
this.utapi.redis.host = config.utapi.redis.host;
|
||||||
|
this.utapi.redis.port = config.utapi.redis.port;
|
||||||
|
}
|
||||||
|
if (config.utapi.redis.retry !== undefined) {
|
||||||
|
if (config.utapi.redis.retry.connectBackoff !== undefined) {
|
||||||
|
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
|
||||||
|
assert.strictEqual(typeof min, 'number',
|
||||||
|
'utapi.redis.retry.connectBackoff: min must be a number');
|
||||||
|
assert.strictEqual(typeof max, 'number',
|
||||||
|
'utapi.redis.retry.connectBackoff: max must be a number');
|
||||||
|
assert.strictEqual(typeof jitter, 'number',
|
||||||
|
'utapi.redis.retry.connectBackoff: jitter must be a number');
|
||||||
|
assert.strictEqual(typeof factor, 'number',
|
||||||
|
'utapi.redis.retry.connectBackoff: factor must be a number');
|
||||||
|
assert.strictEqual(typeof deadline, 'number',
|
||||||
|
'utapi.redis.retry.connectBackoff: deadline must be a number');
|
||||||
|
}
|
||||||
|
|
||||||
|
this.utapi.redis.retry = config.utapi.redis.retry;
|
||||||
|
} else {
|
||||||
this.utapi.redis.retry = {
|
this.utapi.redis.retry = {
|
||||||
connectBackoff: {
|
connectBackoff: {
|
||||||
min: 10,
|
min: 10,
|
||||||
|
@ -1189,6 +1237,22 @@ class Config extends EventEmitter {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
if (config.utapi.redis.password !== undefined) {
|
||||||
|
assert(
|
||||||
|
this._verifyRedisPassword(config.utapi.redis.password),
|
||||||
|
'config: invalid password for utapi redis. password' +
|
||||||
|
' must be a string');
|
||||||
|
this.utapi.redis.password = config.utapi.redis.password;
|
||||||
|
}
|
||||||
|
if (config.utapi.redis.sentinelPassword !== undefined) {
|
||||||
|
assert(
|
||||||
|
this._verifyRedisPassword(
|
||||||
|
config.utapi.redis.sentinelPassword),
|
||||||
|
'config: invalid password for utapi redis. password' +
|
||||||
|
' must be a string');
|
||||||
|
this.utapi.redis.sentinelPassword =
|
||||||
|
config.utapi.redis.sentinelPassword;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if (config.utapi.metrics) {
|
if (config.utapi.metrics) {
|
||||||
this.utapi.metrics = config.utapi.metrics;
|
this.utapi.metrics = config.utapi.metrics;
|
||||||
|
@ -1258,7 +1322,8 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (config.utapi && config.utapi.reindex) {
|
if (config.utapi && config.utapi.reindex) {
|
||||||
this.utapi.reindex = parseUtapiReindex(config.utapi.reindex);
|
parseUtapiReindex(config.utapi.reindex);
|
||||||
|
this.utapi.reindex = config.utapi.reindex;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1303,8 +1368,6 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
this.authdata = config.authdata || 'authdata.json';
|
|
||||||
|
|
||||||
this.kms = {};
|
this.kms = {};
|
||||||
if (config.kms) {
|
if (config.kms) {
|
||||||
assert(typeof config.kms.userName === 'string');
|
assert(typeof config.kms.userName === 'string');
|
||||||
|
@ -1524,6 +1587,25 @@ class Config extends EventEmitter {
|
||||||
this.outboundProxy.certs = certObj.certs;
|
this.outboundProxy.certs = certObj.certs;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
this.managementAgent = {};
|
||||||
|
this.managementAgent.port = 8010;
|
||||||
|
this.managementAgent.host = 'localhost';
|
||||||
|
if (config.managementAgent !== undefined) {
|
||||||
|
if (config.managementAgent.port !== undefined) {
|
||||||
|
assert(Number.isInteger(config.managementAgent.port)
|
||||||
|
&& config.managementAgent.port > 0,
|
||||||
|
'bad config: managementAgent port must be a positive ' +
|
||||||
|
'integer');
|
||||||
|
this.managementAgent.port = config.managementAgent.port;
|
||||||
|
}
|
||||||
|
if (config.managementAgent.host !== undefined) {
|
||||||
|
assert.strictEqual(typeof config.managementAgent.host, 'string',
|
||||||
|
'bad config: management agent host must ' +
|
||||||
|
'be a string');
|
||||||
|
this.managementAgent.host = config.managementAgent.host;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Ephemeral token to protect the reporting endpoint:
|
// Ephemeral token to protect the reporting endpoint:
|
||||||
// try inherited from parent first, then hardcoded in conf file,
|
// try inherited from parent first, then hardcoded in conf file,
|
||||||
// then create a fresh one as last resort.
|
// then create a fresh one as last resort.
|
||||||
|
@ -1613,8 +1695,6 @@ class Config extends EventEmitter {
|
||||||
'bad config: maxScannedLifecycleListingEntries must be greater than 2');
|
'bad config: maxScannedLifecycleListingEntries must be greater than 2');
|
||||||
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
|
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
|
||||||
}
|
}
|
||||||
|
|
||||||
this._configureBackends(config);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_setTimeOptions() {
|
_setTimeOptions() {
|
||||||
|
@ -1653,37 +1733,35 @@ class Config extends EventEmitter {
|
||||||
}
|
}
|
||||||
|
|
||||||
_getAuthData() {
|
_getAuthData() {
|
||||||
return JSON.parse(fs.readFileSync(findConfigFile(process.env.S3AUTH_CONFIG || this.authdata), { encoding: 'utf-8' }));
|
return require(findConfigFile(process.env.S3AUTH_CONFIG || 'authdata.json'));
|
||||||
}
|
}
|
||||||
|
|
||||||
_configureBackends(config) {
|
_configureBackends() {
|
||||||
const backends = config.backends || {};
|
|
||||||
/**
|
/**
|
||||||
* Configure the backends for Authentication, Data and Metadata.
|
* Configure the backends for Authentication, Data and Metadata.
|
||||||
*/
|
*/
|
||||||
let auth = backends.auth || 'mem';
|
let auth = 'mem';
|
||||||
let data = backends.data || 'multiple';
|
let data = 'multiple';
|
||||||
let metadata = backends.metadata || 'file';
|
let metadata = 'file';
|
||||||
let kms = backends.kms || 'file';
|
let kms = 'file';
|
||||||
let quota = backends.quota || 'none';
|
let quota = 'none';
|
||||||
if (process.env.S3BACKEND) {
|
if (process.env.S3BACKEND) {
|
||||||
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
|
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
|
||||||
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
|
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
|
||||||
'bad environment variable: S3BACKEND environment variable ' +
|
'bad environment variable: S3BACKEND environment variable ' +
|
||||||
'should be one of mem/file/scality/cdmi'
|
'should be one of mem/file/scality/cdmi'
|
||||||
);
|
);
|
||||||
auth = process.env.S3BACKEND == 'scality' ? 'scality' : 'mem';
|
auth = process.env.S3BACKEND;
|
||||||
data = process.env.S3BACKEND;
|
data = process.env.S3BACKEND;
|
||||||
metadata = process.env.S3BACKEND;
|
metadata = process.env.S3BACKEND;
|
||||||
kms = process.env.S3BACKEND;
|
kms = process.env.S3BACKEND;
|
||||||
}
|
}
|
||||||
if (process.env.S3VAULT) {
|
if (process.env.S3VAULT) {
|
||||||
auth = process.env.S3VAULT;
|
auth = process.env.S3VAULT;
|
||||||
auth = (auth === 'file' || auth === 'mem' || auth === 'cdmi' ? 'mem' : auth);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
|
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
|
||||||
// Auth only checks for 'mem' since mem === file
|
// Auth only checks for 'mem' since mem === file
|
||||||
|
auth = 'mem';
|
||||||
let authData;
|
let authData;
|
||||||
if (process.env.SCALITY_ACCESS_KEY_ID &&
|
if (process.env.SCALITY_ACCESS_KEY_ID &&
|
||||||
process.env.SCALITY_SECRET_ACCESS_KEY) {
|
process.env.SCALITY_SECRET_ACCESS_KEY) {
|
||||||
|
@ -1712,10 +1790,10 @@ class Config extends EventEmitter {
|
||||||
'should be one of mem/file/scality/multiple'
|
'should be one of mem/file/scality/multiple'
|
||||||
);
|
);
|
||||||
data = process.env.S3DATA;
|
data = process.env.S3DATA;
|
||||||
|
}
|
||||||
if (data === 'scality' || data === 'multiple') {
|
if (data === 'scality' || data === 'multiple') {
|
||||||
data = 'multiple';
|
data = 'multiple';
|
||||||
}
|
}
|
||||||
}
|
|
||||||
assert(this.locationConstraints !== undefined &&
|
assert(this.locationConstraints !== undefined &&
|
||||||
this.restEndpoints !== undefined,
|
this.restEndpoints !== undefined,
|
||||||
'bad config: locationConstraints and restEndpoints must be set'
|
'bad config: locationConstraints and restEndpoints must be set'
|
||||||
|
@ -1739,6 +1817,10 @@ class Config extends EventEmitter {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_verifyRedisPassword(password) {
|
||||||
|
return typeof password === 'string';
|
||||||
|
}
|
||||||
|
|
||||||
setAuthDataAccounts(accounts) {
|
setAuthDataAccounts(accounts) {
|
||||||
this.authData.accounts = accounts;
|
this.authData.accounts = accounts;
|
||||||
this.emit('authdata-update');
|
this.emit('authdata-update');
|
||||||
|
@ -1873,7 +1955,6 @@ class Config extends EventEmitter {
|
||||||
|
|
||||||
module.exports = {
|
module.exports = {
|
||||||
parseSproxydConfig,
|
parseSproxydConfig,
|
||||||
parseRedisConfig,
|
|
||||||
locationConstraintAssert,
|
locationConstraintAssert,
|
||||||
ConfigObject: Config,
|
ConfigObject: Config,
|
||||||
config: new Config(),
|
config: new Config(),
|
||||||
|
|
|
@ -52,7 +52,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
apiMethod, 's3');
|
apiMethod, 's3');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (apiMethod === 'bucketPut') {
|
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -65,17 +65,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
|
|
||||||
const requestContexts = [];
|
const requestContexts = [];
|
||||||
|
|
||||||
if (apiMethod === 'multiObjectDelete') {
|
if (apiMethodAfterVersionCheck === 'objectCopy'
|
||||||
// MultiObjectDelete does not require any authorization when evaluating
|
|
||||||
// the API. Instead, we authorize each object passed.
|
|
||||||
// But in order to get any relevant information from the authorization service
|
|
||||||
// for example, the account quota, we must send a request context object
|
|
||||||
// with no `specificResource`. We expect the result to be an implicit deny.
|
|
||||||
// In the API, we then ignore these authorization results, and we can use
|
|
||||||
// any information returned, e.g., the quota.
|
|
||||||
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
|
|
||||||
requestContexts.push(requestContextMultiObjectDelete);
|
|
||||||
} else if (apiMethodAfterVersionCheck === 'objectCopy'
|
|
||||||
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
|
||||||
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
|
||||||
'objectGet';
|
'objectGet';
|
||||||
|
|
|
@ -81,6 +81,8 @@ function isMetricStale(metric, resourceType, resourceName, action, inflight, log
|
||||||
* @param {number} inflight - The number of inflight requests.
|
* @param {number} inflight - The number of inflight requests.
|
||||||
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
|
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
|
||||||
* @param {string} action - The action being performed.
|
* @param {string} action - The action being performed.
|
||||||
|
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
|
||||||
|
* the incoming bytes, are under the limit.
|
||||||
* @param {object} log - The logger object.
|
* @param {object} log - The logger object.
|
||||||
* @param {function} callback - The callback function to be called when evaluation is complete.
|
* @param {function} callback - The callback function to be called when evaluation is complete.
|
||||||
* @returns {object} - The result of the evaluation.
|
* @returns {object} - The result of the evaluation.
|
||||||
|
@ -93,25 +95,28 @@ function _evaluateQuotas(
|
||||||
inflight,
|
inflight,
|
||||||
inflightForCheck,
|
inflightForCheck,
|
||||||
action,
|
action,
|
||||||
|
isStorageReserved,
|
||||||
log,
|
log,
|
||||||
callback,
|
callback,
|
||||||
) {
|
) {
|
||||||
let bucketQuotaExceeded = false;
|
let bucketQuotaExceeded = false;
|
||||||
let accountQuotaExceeded = false;
|
let accountQuotaExceeded = false;
|
||||||
const creationDate = new Date(bucket.getCreationDate()).getTime();
|
const creationDate = new Date(bucket.getCreationDate()).getTime();
|
||||||
|
const spaceReservedMultiplier = isStorageReserved ? -1 : 1;
|
||||||
return async.parallel({
|
return async.parallel({
|
||||||
bucketQuota: parallelDone => {
|
bucketQuota: parallelDone => {
|
||||||
if (bucketQuota > 0) {
|
if (bucketQuota > 0) {
|
||||||
return QuotaService.getUtilizationMetrics('bucket',
|
return QuotaService.getUtilizationMetrics('bucket',
|
||||||
`${bucket.getName()}_${creationDate}`, null, {
|
`${bucket.getName()}_${creationDate}`, null, {
|
||||||
action,
|
action,
|
||||||
inflight,
|
inflight: isStorageReserved ? 0 : inflight,
|
||||||
}, (err, bucketMetrics) => {
|
}, (err, bucketMetrics) => {
|
||||||
if (err || inflight < 0) {
|
if (err || inflight < 0) {
|
||||||
return parallelDone(err);
|
return parallelDone(err);
|
||||||
}
|
}
|
||||||
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
|
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
|
||||||
bucketMetrics.bytesTotal + inflightForCheck > bucketQuota) {
|
bucketMetrics.bytesTotal +
|
||||||
|
(inflightForCheck * spaceReservedMultiplier) > bucketQuota) {
|
||||||
log.debug('Bucket quota exceeded', {
|
log.debug('Bucket quota exceeded', {
|
||||||
bucket: bucket.getName(),
|
bucket: bucket.getName(),
|
||||||
action,
|
action,
|
||||||
|
@ -131,13 +136,14 @@ function _evaluateQuotas(
|
||||||
return QuotaService.getUtilizationMetrics('account',
|
return QuotaService.getUtilizationMetrics('account',
|
||||||
account.account, null, {
|
account.account, null, {
|
||||||
action,
|
action,
|
||||||
inflight,
|
inflight: isStorageReserved ? 0 : inflight,
|
||||||
}, (err, accountMetrics) => {
|
}, (err, accountMetrics) => {
|
||||||
if (err || inflight < 0) {
|
if (err || inflight < 0) {
|
||||||
return parallelDone(err);
|
return parallelDone(err);
|
||||||
}
|
}
|
||||||
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
|
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
|
||||||
accountMetrics.bytesTotal + inflightForCheck > accountQuota) {
|
accountMetrics.bytesTotal +
|
||||||
|
(inflightForCheck * spaceReservedMultiplier) > accountQuota) {
|
||||||
log.debug('Account quota exceeded', {
|
log.debug('Account quota exceeded', {
|
||||||
accountId: account.account,
|
accountId: account.account,
|
||||||
action,
|
action,
|
||||||
|
@ -189,14 +195,14 @@ function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
|
||||||
* @param {array} apiNames - action names: operations to authorize
|
* @param {array} apiNames - action names: operations to authorize
|
||||||
* @param {string} apiMethod - the main API call
|
* @param {string} apiMethod - the main API call
|
||||||
* @param {number} inflight - inflight bytes
|
* @param {number} inflight - inflight bytes
|
||||||
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
|
* @param {boolean} isStorageReserved - flag to check if the current quota, minus
|
||||||
* the incoming bytes, are under the limit.
|
* the incoming bytes, are under the limit.
|
||||||
* @param {Logger} log - logger
|
* @param {Logger} log - logger
|
||||||
* @param {function} callback - callback function
|
* @param {function} callback - callback function
|
||||||
* @returns {boolean} - true if the quota is valid, false otherwise
|
* @returns {boolean} - true if the quota is valid, false otherwise
|
||||||
*/
|
*/
|
||||||
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
|
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
|
||||||
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
|
if (!config.isQuotaEnabled() || !inflight) {
|
||||||
return callback(null);
|
return callback(null);
|
||||||
}
|
}
|
||||||
let type;
|
let type;
|
||||||
|
@ -230,11 +236,6 @@ function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight,
|
||||||
return callback(null);
|
return callback(null);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (isStorageReserved) {
|
|
||||||
// eslint-disable-next-line no-param-reassign
|
|
||||||
inflight = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
return async.forEach(apiNames, (apiName, done) => {
|
return async.forEach(apiNames, (apiName, done) => {
|
||||||
// Object copy operations first check the target object,
|
// Object copy operations first check the target object,
|
||||||
// meaning the source object, containing the current bytes,
|
// meaning the source object, containing the current bytes,
|
||||||
|
@ -255,7 +256,7 @@ function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight,
|
||||||
let _inflights = shouldSendInflights ? inflight : undefined;
|
let _inflights = shouldSendInflights ? inflight : undefined;
|
||||||
const inflightForCheck = shouldSendInflights ? 0 : inflight;
|
const inflightForCheck = shouldSendInflights ? 0 : inflight;
|
||||||
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
|
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
|
||||||
inflightForCheck, apiName, log,
|
inflightForCheck, apiName, isStorageReserved, log,
|
||||||
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
|
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
|
||||||
if (err) {
|
if (err) {
|
||||||
return done(err);
|
return done(err);
|
||||||
|
@ -278,7 +279,7 @@ function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight,
|
||||||
cb => {
|
cb => {
|
||||||
if (errorFromAPI) {
|
if (errorFromAPI) {
|
||||||
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
|
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
|
||||||
null, apiName, log, cb);
|
null, apiName, false, log, cb);
|
||||||
}
|
}
|
||||||
return cb();
|
return cb();
|
||||||
},
|
},
|
||||||
|
|
|
@ -45,8 +45,9 @@ function checkLocationConstraint(request, locationConstraint, log) {
|
||||||
} else if (parsedHost && restEndpoints[parsedHost]) {
|
} else if (parsedHost && restEndpoints[parsedHost]) {
|
||||||
locationConstraintChecked = restEndpoints[parsedHost];
|
locationConstraintChecked = restEndpoints[parsedHost];
|
||||||
} else {
|
} else {
|
||||||
locationConstraintChecked = Object.keys(locationConstrains)[0];
|
log.trace('no location constraint provided on bucket put;' +
|
||||||
log.trace('no location constraint provided on bucket put; setting '+locationConstraintChecked);
|
'setting us-east-1');
|
||||||
|
locationConstraintChecked = 'us-east-1';
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!locationConstraints[locationConstraintChecked]) {
|
if (!locationConstraints[locationConstraintChecked]) {
|
||||||
|
|
|
@ -6,7 +6,6 @@ const convertToXml = s3middleware.convertToXml;
|
||||||
const { pushMetric } = require('../utapi/utilities');
|
const { pushMetric } = require('../utapi/utilities');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
const { hasNonPrintables } = require('../utilities/stringChecks');
|
const { hasNonPrintables } = require('../utilities/stringChecks');
|
||||||
const { config } = require('../Config');
|
|
||||||
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
const services = require('../services');
|
const services = require('../services');
|
||||||
|
@ -66,7 +65,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
|
||||||
const websiteRedirectHeader =
|
const websiteRedirectHeader =
|
||||||
request.headers['x-amz-website-redirect-location'];
|
request.headers['x-amz-website-redirect-location'];
|
||||||
if (request.headers['x-amz-storage-class'] &&
|
if (request.headers['x-amz-storage-class'] &&
|
||||||
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
|
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
|
||||||
log.trace('invalid storage-class header');
|
log.trace('invalid storage-class header');
|
||||||
monitoring.promMetrics('PUT', bucketName,
|
monitoring.promMetrics('PUT', bucketName,
|
||||||
errors.InvalidStorageClass.code, 'initiateMultipartUpload');
|
errors.InvalidStorageClass.code, 'initiateMultipartUpload');
|
||||||
|
|
|
@ -508,9 +508,8 @@ function multiObjectDelete(authInfo, request, log, callback) {
|
||||||
if (bucketShield(bucketMD, 'objectDelete')) {
|
if (bucketShield(bucketMD, 'objectDelete')) {
|
||||||
return next(errors.NoSuchBucket);
|
return next(errors.NoSuchBucket);
|
||||||
}
|
}
|
||||||
// The implicit deny flag is ignored in the DeleteObjects API, as authorization only
|
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request,
|
||||||
// affects the objects.
|
request.actionImplicitDenies)) {
|
||||||
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) {
|
|
||||||
log.trace("access denied due to bucket acl's");
|
log.trace("access denied due to bucket acl's");
|
||||||
// if access denied at the bucket level, no access for
|
// if access denied at the bucket level, no access for
|
||||||
// any of the objects so all results will be error results
|
// any of the objects so all results will be error results
|
||||||
|
|
|
@ -249,7 +249,7 @@ function objectCopy(authInfo, request, sourceBucket,
|
||||||
const responseHeaders = {};
|
const responseHeaders = {};
|
||||||
|
|
||||||
if (request.headers['x-amz-storage-class'] &&
|
if (request.headers['x-amz-storage-class'] &&
|
||||||
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
|
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
|
||||||
log.trace('invalid storage-class header');
|
log.trace('invalid storage-class header');
|
||||||
monitoring.promMetrics('PUT', destBucketName,
|
monitoring.promMetrics('PUT', destBucketName,
|
||||||
errors.InvalidStorageClass.code, 'copyObject');
|
errors.InvalidStorageClass.code, 'copyObject');
|
||||||
|
|
|
@ -3,7 +3,6 @@ const { errors, versioning } = require('arsenal');
|
||||||
|
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
const aclUtils = require('../utilities/aclUtils');
|
const aclUtils = require('../utilities/aclUtils');
|
||||||
const { config } = require('../Config');
|
|
||||||
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
|
||||||
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
|
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
|
||||||
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
|
||||||
|
@ -72,7 +71,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
|
||||||
query,
|
query,
|
||||||
} = request;
|
} = request;
|
||||||
if (headers['x-amz-storage-class'] &&
|
if (headers['x-amz-storage-class'] &&
|
||||||
!config.locationConstraints[headers['x-amz-storage-class']]) {
|
!constants.validStorageClasses.includes(headers['x-amz-storage-class'])) {
|
||||||
log.trace('invalid storage-class header');
|
log.trace('invalid storage-class header');
|
||||||
monitoring.promMetrics('PUT', request.bucketName,
|
monitoring.promMetrics('PUT', request.bucketName,
|
||||||
errors.InvalidStorageClass.code, 'putObject');
|
errors.InvalidStorageClass.code, 'putObject');
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
const vaultclient = require('vaultclient');
|
||||||
const { auth } = require('arsenal');
|
const { auth } = require('arsenal');
|
||||||
|
|
||||||
const { config } = require('../Config');
|
const { config } = require('../Config');
|
||||||
|
@ -20,7 +21,6 @@ function getVaultClient(config) {
|
||||||
port,
|
port,
|
||||||
https: true,
|
https: true,
|
||||||
});
|
});
|
||||||
const vaultclient = require('vaultclient');
|
|
||||||
vaultClient = new vaultclient.Client(host, port, true, key, cert, ca);
|
vaultClient = new vaultclient.Client(host, port, true, key, cert, ca);
|
||||||
} else {
|
} else {
|
||||||
logger.info('vaultclient configuration', {
|
logger.info('vaultclient configuration', {
|
||||||
|
@ -28,7 +28,6 @@ function getVaultClient(config) {
|
||||||
port,
|
port,
|
||||||
https: false,
|
https: false,
|
||||||
});
|
});
|
||||||
const vaultclient = require('vaultclient');
|
|
||||||
vaultClient = new vaultclient.Client(host, port);
|
vaultClient = new vaultclient.Client(host, port);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -50,6 +49,10 @@ function getMemBackend(config) {
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (config.backends.auth) {
|
switch (config.backends.auth) {
|
||||||
|
case 'mem':
|
||||||
|
implName = 'vaultMem';
|
||||||
|
client = getMemBackend(config);
|
||||||
|
break;
|
||||||
case 'multiple':
|
case 'multiple':
|
||||||
implName = 'vaultChain';
|
implName = 'vaultChain';
|
||||||
client = new ChainBackend('s3', [
|
client = new ChainBackend('s3', [
|
||||||
|
@ -57,14 +60,9 @@ case 'multiple':
|
||||||
getVaultClient(config),
|
getVaultClient(config),
|
||||||
]);
|
]);
|
||||||
break;
|
break;
|
||||||
case 'vault':
|
default: // vault
|
||||||
implName = 'vault';
|
implName = 'vault';
|
||||||
client = getVaultClient(config);
|
client = getVaultClient(config);
|
||||||
break;
|
|
||||||
default: // mem
|
|
||||||
implName = 'vaultMem';
|
|
||||||
client = getMemBackend(config);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
module.exports = new Vault(client, implName);
|
module.exports = new Vault(client, implName);
|
||||||
|
|
|
@ -8,6 +8,20 @@ const inMemory = require('./in_memory/backend').backend;
|
||||||
const file = require('./file/backend');
|
const file = require('./file/backend');
|
||||||
const KMIPClient = require('arsenal').network.kmipClient;
|
const KMIPClient = require('arsenal').network.kmipClient;
|
||||||
const Common = require('./common');
|
const Common = require('./common');
|
||||||
|
let scalityKMS;
|
||||||
|
let scalityKMSImpl;
|
||||||
|
try {
|
||||||
|
// eslint-disable-next-line import/no-unresolved
|
||||||
|
const ScalityKMS = require('scality-kms');
|
||||||
|
scalityKMS = new ScalityKMS(config.kms);
|
||||||
|
scalityKMSImpl = 'scalityKms';
|
||||||
|
} catch (error) {
|
||||||
|
logger.warn('scality kms unavailable. ' +
|
||||||
|
'Using file kms backend unless mem specified.',
|
||||||
|
{ error });
|
||||||
|
scalityKMS = file;
|
||||||
|
scalityKMSImpl = 'fileKms';
|
||||||
|
}
|
||||||
|
|
||||||
let client;
|
let client;
|
||||||
let implName;
|
let implName;
|
||||||
|
@ -19,9 +33,8 @@ if (config.backends.kms === 'mem') {
|
||||||
client = file;
|
client = file;
|
||||||
implName = 'fileKms';
|
implName = 'fileKms';
|
||||||
} else if (config.backends.kms === 'scality') {
|
} else if (config.backends.kms === 'scality') {
|
||||||
const ScalityKMS = require('scality-kms');
|
client = scalityKMS;
|
||||||
client = new ScalityKMS(config.kms);
|
implName = scalityKMSImpl;
|
||||||
implName = 'scalityKms';
|
|
||||||
} else if (config.backends.kms === 'kmip') {
|
} else if (config.backends.kms === 'kmip') {
|
||||||
const kmipConfig = { kmip: config.kmip };
|
const kmipConfig = { kmip: config.kmip };
|
||||||
if (!kmipConfig.kmip) {
|
if (!kmipConfig.kmip) {
|
||||||
|
|
|
@ -0,0 +1,131 @@
|
||||||
|
/**
|
||||||
|
* Target service that should handle a message
|
||||||
|
* @readonly
|
||||||
|
* @enum {number}
|
||||||
|
*/
|
||||||
|
const MessageType = {
|
||||||
|
/** Message that contains a configuration overlay */
|
||||||
|
CONFIG_OVERLAY_MESSAGE: 1,
|
||||||
|
/** Message that requests a metrics report */
|
||||||
|
METRICS_REQUEST_MESSAGE: 2,
|
||||||
|
/** Message that contains a metrics report */
|
||||||
|
METRICS_REPORT_MESSAGE: 3,
|
||||||
|
/** Close the virtual TCP socket associated to the channel */
|
||||||
|
CHANNEL_CLOSE_MESSAGE: 4,
|
||||||
|
/** Write data to the virtual TCP socket associated to the channel */
|
||||||
|
CHANNEL_PAYLOAD_MESSAGE: 5,
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Target service that should handle a message
|
||||||
|
* @readonly
|
||||||
|
* @enum {number}
|
||||||
|
*/
|
||||||
|
const TargetType = {
|
||||||
|
/** Let the dispatcher choose the most appropriate message */
|
||||||
|
TARGET_ANY: 0,
|
||||||
|
};
|
||||||
|
|
||||||
|
const headerSize = 3;
|
||||||
|
|
||||||
|
class ChannelMessageV0 {
|
||||||
|
/**
|
||||||
|
* @param {Buffer} buffer Message bytes
|
||||||
|
*/
|
||||||
|
constructor(buffer) {
|
||||||
|
this.messageType = buffer.readUInt8(0);
|
||||||
|
this.channelNumber = buffer.readUInt8(1);
|
||||||
|
this.target = buffer.readUInt8(2);
|
||||||
|
this.payload = buffer.slice(headerSize);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @returns {number} Message type
|
||||||
|
*/
|
||||||
|
getType() {
|
||||||
|
return this.messageType;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @returns {number} Channel number if applicable
|
||||||
|
*/
|
||||||
|
getChannelNumber() {
|
||||||
|
return this.channelNumber;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @returns {number} Target service, or 0 to choose automatically
|
||||||
|
*/
|
||||||
|
getTarget() {
|
||||||
|
return this.target;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @returns {Buffer} Message payload if applicable
|
||||||
|
*/
|
||||||
|
getPayload() {
|
||||||
|
return this.payload;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a wire representation of a channel close message
|
||||||
|
*
|
||||||
|
* @param {number} channelId Channel number
|
||||||
|
*
|
||||||
|
* @returns {Buffer} wire representation
|
||||||
|
*/
|
||||||
|
static encodeChannelCloseMessage(channelId) {
|
||||||
|
const buf = Buffer.alloc(headerSize);
|
||||||
|
buf.writeUInt8(MessageType.CHANNEL_CLOSE_MESSAGE, 0);
|
||||||
|
buf.writeUInt8(channelId, 1);
|
||||||
|
buf.writeUInt8(TargetType.TARGET_ANY, 2);
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a wire representation of a channel data message
|
||||||
|
*
|
||||||
|
* @param {number} channelId Channel number
|
||||||
|
* @param {Buffer} data Payload
|
||||||
|
*
|
||||||
|
* @returns {Buffer} wire representation
|
||||||
|
*/
|
||||||
|
static encodeChannelDataMessage(channelId, data) {
|
||||||
|
const buf = Buffer.alloc(data.length + headerSize);
|
||||||
|
buf.writeUInt8(MessageType.CHANNEL_PAYLOAD_MESSAGE, 0);
|
||||||
|
buf.writeUInt8(channelId, 1);
|
||||||
|
buf.writeUInt8(TargetType.TARGET_ANY, 2);
|
||||||
|
data.copy(buf, headerSize);
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a wire representation of a metrics message
|
||||||
|
*
|
||||||
|
* @param {object} body Metrics report
|
||||||
|
*
|
||||||
|
* @returns {Buffer} wire representation
|
||||||
|
*/
|
||||||
|
static encodeMetricsReportMessage(body) {
|
||||||
|
const report = JSON.stringify(body);
|
||||||
|
const buf = Buffer.alloc(report.length + headerSize);
|
||||||
|
buf.writeUInt8(MessageType.METRICS_REPORT_MESSAGE, 0);
|
||||||
|
buf.writeUInt8(0, 1);
|
||||||
|
buf.writeUInt8(TargetType.TARGET_ANY, 2);
|
||||||
|
buf.write(report, headerSize);
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Protocol name used for subprotocol negociation
|
||||||
|
*/
|
||||||
|
static get protocolName() {
|
||||||
|
return 'zenko-secure-channel-v0';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
ChannelMessageV0,
|
||||||
|
MessageType,
|
||||||
|
TargetType,
|
||||||
|
};
|
|
@ -0,0 +1,94 @@
|
||||||
|
const WebSocket = require('ws');
|
||||||
|
const arsenal = require('arsenal');
|
||||||
|
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
|
const _config = require('../Config').config;
|
||||||
|
const { patchConfiguration } = require('./configuration');
|
||||||
|
const { reshapeExceptionError } = arsenal.errorUtils;
|
||||||
|
|
||||||
|
|
||||||
|
const managementAgentMessageType = {
|
||||||
|
/** Message that contains the loaded overlay */
|
||||||
|
NEW_OVERLAY: 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
const CONNECTION_RETRY_TIMEOUT_MS = 5000;
|
||||||
|
|
||||||
|
|
||||||
|
function initManagementClient() {
|
||||||
|
const { host, port } = _config.managementAgent;
|
||||||
|
|
||||||
|
const ws = new WebSocket(`ws://${host}:${port}/watch`);
|
||||||
|
|
||||||
|
ws.on('open', () => {
|
||||||
|
logger.info('connected with management agent');
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('close', (code, reason) => {
|
||||||
|
logger.info('disconnected from management agent', { reason });
|
||||||
|
setTimeout(initManagementClient, CONNECTION_RETRY_TIMEOUT_MS);
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('error', error => {
|
||||||
|
logger.error('error on connection with management agent', { error });
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('message', data => {
|
||||||
|
const method = 'initManagementclient::onMessage';
|
||||||
|
const log = logger.newRequestLogger();
|
||||||
|
let msg;
|
||||||
|
|
||||||
|
if (!data) {
|
||||||
|
log.error('message without data', { method });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
msg = JSON.parse(data);
|
||||||
|
} catch (err) {
|
||||||
|
log.error('data is an invalid json', { method, err, data });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (msg.payload === undefined) {
|
||||||
|
log.error('message without payload', { method });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (typeof msg.messageType !== 'number') {
|
||||||
|
log.error('messageType is not an integer', {
|
||||||
|
type: typeof msg.messageType,
|
||||||
|
method,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (msg.messageType) {
|
||||||
|
case managementAgentMessageType.NEW_OVERLAY:
|
||||||
|
patchConfiguration(msg.payload, log, err => {
|
||||||
|
if (err) {
|
||||||
|
log.error('failed to patch overlay', {
|
||||||
|
error: reshapeExceptionError(err),
|
||||||
|
method,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
default:
|
||||||
|
log.error('new overlay message with unmanaged message type', {
|
||||||
|
method,
|
||||||
|
type: msg.messageType,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function isManagementAgentUsed() {
|
||||||
|
return process.env.MANAGEMENT_USE_AGENT === '1';
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
managementAgentMessageType,
|
||||||
|
initManagementClient,
|
||||||
|
isManagementAgentUsed,
|
||||||
|
};
|
|
@ -0,0 +1,240 @@
|
||||||
|
const arsenal = require('arsenal');
|
||||||
|
|
||||||
|
const { buildAuthDataAccount } = require('../auth/in_memory/builder');
|
||||||
|
const _config = require('../Config').config;
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
|
||||||
|
const { getStoredCredentials } = require('./credentials');
|
||||||
|
|
||||||
|
const latestOverlayVersionKey = 'configuration/overlay-version';
|
||||||
|
const managementDatabaseName = 'PENSIEVE';
|
||||||
|
const replicatorEndpoint = 'zenko-cloudserver-replicator';
|
||||||
|
const { decryptSecret } = arsenal.pensieve.credentialUtils;
|
||||||
|
const { patchLocations } = arsenal.patches.locationConstraints;
|
||||||
|
const { reshapeExceptionError } = arsenal.errorUtils;
|
||||||
|
const { replicationBackends } = require('arsenal').constants;
|
||||||
|
|
||||||
|
function overlayHasVersion(overlay) {
|
||||||
|
return overlay && overlay.version !== undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
function remoteOverlayIsNewer(cachedOverlay, remoteOverlay) {
|
||||||
|
return (overlayHasVersion(remoteOverlay) &&
|
||||||
|
(!overlayHasVersion(cachedOverlay) ||
|
||||||
|
remoteOverlay.version > cachedOverlay.version));
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Updates the live {Config} object with the new overlay configuration.
|
||||||
|
*
|
||||||
|
* No-op if this version was already applied to the live {Config}.
|
||||||
|
*
|
||||||
|
* @param {object} newConf Overlay configuration to apply
|
||||||
|
* @param {werelogs~Logger} log Request-scoped logger
|
||||||
|
* @param {function} cb Function to call with (error, newConf)
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function patchConfiguration(newConf, log, cb) {
|
||||||
|
if (newConf.version === undefined) {
|
||||||
|
log.debug('no remote configuration created yet');
|
||||||
|
return process.nextTick(cb, null, newConf);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_config.overlayVersion !== undefined &&
|
||||||
|
newConf.version <= _config.overlayVersion) {
|
||||||
|
log.debug('configuration version already applied',
|
||||||
|
{ configurationVersion: newConf.version });
|
||||||
|
return process.nextTick(cb, null, newConf);
|
||||||
|
}
|
||||||
|
return getStoredCredentials(log, (err, creds) => {
|
||||||
|
if (err) {
|
||||||
|
return cb(err);
|
||||||
|
}
|
||||||
|
const accounts = [];
|
||||||
|
if (newConf.users) {
|
||||||
|
newConf.users.forEach(u => {
|
||||||
|
if (u.secretKey && u.secretKey.length > 0) {
|
||||||
|
const secretKey = decryptSecret(creds, u.secretKey);
|
||||||
|
// accountType will be service-replication or service-clueso
|
||||||
|
let serviceName;
|
||||||
|
if (u.accountType && u.accountType.startsWith('service-')) {
|
||||||
|
serviceName = u.accountType.split('-')[1];
|
||||||
|
}
|
||||||
|
const newAccount = buildAuthDataAccount(
|
||||||
|
u.accessKey, secretKey, u.canonicalId, serviceName,
|
||||||
|
u.userName);
|
||||||
|
accounts.push(newAccount.accounts[0]);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const restEndpoints = Object.assign({}, _config.restEndpoints);
|
||||||
|
if (newConf.endpoints) {
|
||||||
|
newConf.endpoints.forEach(e => {
|
||||||
|
restEndpoints[e.hostname] = e.locationName;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!restEndpoints[replicatorEndpoint]) {
|
||||||
|
restEndpoints[replicatorEndpoint] = 'us-east-1';
|
||||||
|
}
|
||||||
|
|
||||||
|
const locations = patchLocations(newConf.locations, creds, log);
|
||||||
|
if (Object.keys(locations).length !== 0) {
|
||||||
|
try {
|
||||||
|
_config.setLocationConstraints(locations);
|
||||||
|
} catch (error) {
|
||||||
|
const exceptionError = reshapeExceptionError(error);
|
||||||
|
log.error('could not apply configuration version location ' +
|
||||||
|
'constraints', { error: exceptionError,
|
||||||
|
method: 'getStoredCredentials' });
|
||||||
|
return cb(exceptionError);
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
const locationsWithReplicationBackend = Object.keys(locations)
|
||||||
|
// NOTE: In Orbit, we don't need to have Scality location in our
|
||||||
|
// replication endpoind config, since we do not replicate to
|
||||||
|
// any Scality Instance yet.
|
||||||
|
.filter(key => replicationBackends
|
||||||
|
[locations[key].type])
|
||||||
|
.reduce((obj, key) => {
|
||||||
|
/* eslint no-param-reassign:0 */
|
||||||
|
obj[key] = locations[key];
|
||||||
|
return obj;
|
||||||
|
}, {});
|
||||||
|
_config.setReplicationEndpoints(
|
||||||
|
locationsWithReplicationBackend);
|
||||||
|
} catch (error) {
|
||||||
|
const exceptionError = reshapeExceptionError(error);
|
||||||
|
log.error('could not apply replication endpoints',
|
||||||
|
{ error: exceptionError, method: 'getStoredCredentials' });
|
||||||
|
return cb(exceptionError);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_config.setAuthDataAccounts(accounts);
|
||||||
|
_config.setRestEndpoints(restEndpoints);
|
||||||
|
_config.setPublicInstanceId(newConf.instanceId);
|
||||||
|
|
||||||
|
if (newConf.browserAccess) {
|
||||||
|
if (Boolean(_config.browserAccessEnabled) !==
|
||||||
|
Boolean(newConf.browserAccess.enabled)) {
|
||||||
|
_config.browserAccessEnabled =
|
||||||
|
Boolean(newConf.browserAccess.enabled);
|
||||||
|
_config.emit('browser-access-enabled-change');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_config.overlayVersion = newConf.version;
|
||||||
|
|
||||||
|
log.info('applied configuration version',
|
||||||
|
{ configurationVersion: _config.overlayVersion });
|
||||||
|
|
||||||
|
return cb(null, newConf);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Writes configuration version to the management database
|
||||||
|
*
|
||||||
|
* @param {object} cachedOverlay Latest stored configuration version
|
||||||
|
* for freshness comparison purposes
|
||||||
|
* @param {object} remoteOverlay New configuration version
|
||||||
|
* @param {werelogs~Logger} log Request-scoped logger
|
||||||
|
* @param {function} cb Function to call with (error, remoteOverlay)
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function saveConfigurationVersion(cachedOverlay, remoteOverlay, log, cb) {
|
||||||
|
if (remoteOverlayIsNewer(cachedOverlay, remoteOverlay)) {
|
||||||
|
const objName = `configuration/overlay/${remoteOverlay.version}`;
|
||||||
|
metadata.putObjectMD(managementDatabaseName, objName, remoteOverlay,
|
||||||
|
{}, log, error => {
|
||||||
|
if (error) {
|
||||||
|
const exceptionError = reshapeExceptionError(error);
|
||||||
|
log.error('could not save configuration',
|
||||||
|
{ error: exceptionError,
|
||||||
|
method: 'saveConfigurationVersion',
|
||||||
|
configurationVersion: remoteOverlay.version });
|
||||||
|
cb(exceptionError);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
metadata.putObjectMD(managementDatabaseName,
|
||||||
|
latestOverlayVersionKey, remoteOverlay.version, {}, log,
|
||||||
|
error => {
|
||||||
|
if (error) {
|
||||||
|
log.error('could not save configuration version', {
|
||||||
|
configurationVersion: remoteOverlay.version,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
cb(error, remoteOverlay);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
log.debug('no remote configuration to cache yet');
|
||||||
|
process.nextTick(cb, null, remoteOverlay);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Loads the latest cached configuration overlay from the management
|
||||||
|
* database, without contacting the Orbit API.
|
||||||
|
*
|
||||||
|
* @param {werelogs~Logger} log Request-scoped logger
|
||||||
|
* @param {function} callback Function called with (error, cachedOverlay)
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function loadCachedOverlay(log, callback) {
|
||||||
|
return metadata.getObjectMD(managementDatabaseName,
|
||||||
|
latestOverlayVersionKey, {}, log, (err, version) => {
|
||||||
|
if (err) {
|
||||||
|
if (err.is.NoSuchKey) {
|
||||||
|
return process.nextTick(callback, null, {});
|
||||||
|
}
|
||||||
|
return callback(err);
|
||||||
|
}
|
||||||
|
return metadata.getObjectMD(managementDatabaseName,
|
||||||
|
`configuration/overlay/${version}`, {}, log, (err, conf) => {
|
||||||
|
if (err) {
|
||||||
|
if (err.is.NoSuchKey) {
|
||||||
|
return process.nextTick(callback, null, {});
|
||||||
|
}
|
||||||
|
return callback(err);
|
||||||
|
}
|
||||||
|
return callback(null, conf);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function applyAndSaveOverlay(overlay, log) {
|
||||||
|
patchConfiguration(overlay, log, err => {
|
||||||
|
if (err) {
|
||||||
|
log.error('could not apply pushed overlay', {
|
||||||
|
error: reshapeExceptionError(err),
|
||||||
|
method: 'applyAndSaveOverlay',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
saveConfigurationVersion(null, overlay, log, err => {
|
||||||
|
if (err) {
|
||||||
|
log.error('could not cache overlay version', {
|
||||||
|
error: reshapeExceptionError(err),
|
||||||
|
method: 'applyAndSaveOverlay',
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
log.info('overlay push processed');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
loadCachedOverlay,
|
||||||
|
managementDatabaseName,
|
||||||
|
patchConfiguration,
|
||||||
|
saveConfigurationVersion,
|
||||||
|
remoteOverlayIsNewer,
|
||||||
|
applyAndSaveOverlay,
|
||||||
|
};
|
|
@ -0,0 +1,145 @@
|
||||||
|
const arsenal = require('arsenal');
|
||||||
|
const forge = require('node-forge');
|
||||||
|
const request = require('../utilities/request');
|
||||||
|
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
|
||||||
|
const managementDatabaseName = 'PENSIEVE';
|
||||||
|
const tokenConfigurationKey = 'auth/zenko/remote-management-token';
|
||||||
|
const tokenRotationDelay = 3600 * 24 * 7 * 1000; // 7 days
|
||||||
|
const { reshapeExceptionError } = arsenal.errorUtils;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Retrieves Orbit API token from the management database.
|
||||||
|
*
|
||||||
|
* The token is used to authenticate stat posting and
|
||||||
|
*
|
||||||
|
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
|
||||||
|
* initialization process
|
||||||
|
* @param {function} callback Function called with (error, result)
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function getStoredCredentials(log, callback) {
|
||||||
|
metadata.getObjectMD(managementDatabaseName, tokenConfigurationKey, {},
|
||||||
|
log, callback);
|
||||||
|
}
|
||||||
|
|
||||||
|
function issueCredentials(managementEndpoint, instanceId, log, callback) {
|
||||||
|
log.info('registering with API to get token');
|
||||||
|
|
||||||
|
const keyPair = forge.pki.rsa.generateKeyPair({ bits: 2048, e: 0x10001 });
|
||||||
|
const privateKey = forge.pki.privateKeyToPem(keyPair.privateKey);
|
||||||
|
const publicKey = forge.pki.publicKeyToPem(keyPair.publicKey);
|
||||||
|
|
||||||
|
const postData = {
|
||||||
|
publicKey,
|
||||||
|
};
|
||||||
|
|
||||||
|
request.post(`${managementEndpoint}/${instanceId}/register`,
|
||||||
|
{ body: postData, json: true }, (error, response, body) => {
|
||||||
|
if (error) {
|
||||||
|
return callback(error);
|
||||||
|
}
|
||||||
|
if (response.statusCode !== 201) {
|
||||||
|
log.error('could not register instance', {
|
||||||
|
statusCode: response.statusCode,
|
||||||
|
});
|
||||||
|
return callback(arsenal.errors.InternalError);
|
||||||
|
}
|
||||||
|
/* eslint-disable no-param-reassign */
|
||||||
|
body.privateKey = privateKey;
|
||||||
|
/* eslint-enable no-param-reassign */
|
||||||
|
return callback(null, body);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function confirmInstanceCredentials(
|
||||||
|
managementEndpoint, instanceId, creds, log, callback) {
|
||||||
|
const postData = {
|
||||||
|
serial: creds.serial || 0,
|
||||||
|
publicKey: creds.publicKey,
|
||||||
|
};
|
||||||
|
|
||||||
|
const opts = {
|
||||||
|
headers: {
|
||||||
|
'x-instance-authentication-token': creds.token,
|
||||||
|
},
|
||||||
|
body: postData,
|
||||||
|
};
|
||||||
|
|
||||||
|
request.post(`${managementEndpoint}/${instanceId}/confirm`,
|
||||||
|
opts, (error, response) => {
|
||||||
|
if (error) {
|
||||||
|
return callback(error);
|
||||||
|
}
|
||||||
|
if (response.statusCode === 200) {
|
||||||
|
return callback(null, instanceId, creds.token);
|
||||||
|
}
|
||||||
|
return callback(arsenal.errors.InternalError);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initializes credentials and PKI in the management database.
|
||||||
|
*
|
||||||
|
* In case the management database is new and empty, the instance
|
||||||
|
* is registered as new against the Orbit API with newly-generated
|
||||||
|
* RSA key pair.
|
||||||
|
*
|
||||||
|
* @param {string} managementEndpoint API endpoint
|
||||||
|
* @param {string} instanceId UUID of this deployment
|
||||||
|
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
|
||||||
|
* initialization process
|
||||||
|
* @param {function} callback Function called with (error, result)
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function initManagementCredentials(
|
||||||
|
managementEndpoint, instanceId, log, callback) {
|
||||||
|
getStoredCredentials(log, (error, value) => {
|
||||||
|
if (error) {
|
||||||
|
if (error.is.NoSuchKey) {
|
||||||
|
return issueCredentials(managementEndpoint, instanceId, log,
|
||||||
|
(error, value) => {
|
||||||
|
if (error) {
|
||||||
|
log.error('could not issue token',
|
||||||
|
{ error: reshapeExceptionError(error),
|
||||||
|
method: 'initManagementCredentials' });
|
||||||
|
return callback(error);
|
||||||
|
}
|
||||||
|
log.debug('saving token');
|
||||||
|
return metadata.putObjectMD(managementDatabaseName,
|
||||||
|
tokenConfigurationKey, value, {}, log, error => {
|
||||||
|
if (error) {
|
||||||
|
log.error('could not save token',
|
||||||
|
{ error: reshapeExceptionError(error),
|
||||||
|
method: 'initManagementCredentials',
|
||||||
|
});
|
||||||
|
return callback(error);
|
||||||
|
}
|
||||||
|
log.info('saved token locally, ' +
|
||||||
|
'confirming instance');
|
||||||
|
return confirmInstanceCredentials(
|
||||||
|
managementEndpoint, instanceId, value, log,
|
||||||
|
callback);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
log.debug('could not get token', { error });
|
||||||
|
return callback(error);
|
||||||
|
}
|
||||||
|
|
||||||
|
log.info('returning existing token');
|
||||||
|
if (Date.now() - value.issueDate > tokenRotationDelay) {
|
||||||
|
log.warn('management API token is too old, should re-issue');
|
||||||
|
}
|
||||||
|
|
||||||
|
return callback(null, instanceId, value.token);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
getStoredCredentials,
|
||||||
|
initManagementCredentials,
|
||||||
|
};
|
|
@ -0,0 +1,138 @@
|
||||||
|
const arsenal = require('arsenal');
|
||||||
|
const async = require('async');
|
||||||
|
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
|
|
||||||
|
const {
|
||||||
|
loadCachedOverlay,
|
||||||
|
managementDatabaseName,
|
||||||
|
patchConfiguration,
|
||||||
|
} = require('./configuration');
|
||||||
|
const { initManagementCredentials } = require('./credentials');
|
||||||
|
const { startWSManagementClient } = require('./push');
|
||||||
|
const { startPollingManagementClient } = require('./poll');
|
||||||
|
const { reshapeExceptionError } = arsenal.errorUtils;
|
||||||
|
const { isManagementAgentUsed } = require('./agentClient');
|
||||||
|
|
||||||
|
const initRemoteManagementRetryDelay = 10000;
|
||||||
|
|
||||||
|
const managementEndpointRoot =
|
||||||
|
process.env.MANAGEMENT_ENDPOINT ||
|
||||||
|
'https://api.zenko.io';
|
||||||
|
const managementEndpoint = `${managementEndpointRoot}/api/v1/instance`;
|
||||||
|
|
||||||
|
const pushEndpointRoot =
|
||||||
|
process.env.PUSH_ENDPOINT ||
|
||||||
|
'https://push.api.zenko.io';
|
||||||
|
const pushEndpoint = `${pushEndpointRoot}/api/v1/instance`;
|
||||||
|
|
||||||
|
function initManagementDatabase(log, callback) {
|
||||||
|
// XXX choose proper owner names
|
||||||
|
const md = new arsenal.models.BucketInfo(managementDatabaseName, 'owner',
|
||||||
|
'owner display name', new Date().toJSON());
|
||||||
|
|
||||||
|
metadata.createBucket(managementDatabaseName, md, log, error => {
|
||||||
|
if (error) {
|
||||||
|
if (error.is.BucketAlreadyExists) {
|
||||||
|
log.info('created management database');
|
||||||
|
return callback();
|
||||||
|
}
|
||||||
|
log.error('could not initialize management database',
|
||||||
|
{ error: reshapeExceptionError(error),
|
||||||
|
method: 'initManagementDatabase' });
|
||||||
|
return callback(error);
|
||||||
|
}
|
||||||
|
log.info('initialized management database');
|
||||||
|
return callback();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function startManagementListeners(instanceId, token) {
|
||||||
|
const mode = process.env.MANAGEMENT_MODE || 'push';
|
||||||
|
if (mode === 'push') {
|
||||||
|
const url = `${pushEndpoint}/${instanceId}/ws`;
|
||||||
|
startWSManagementClient(url, token);
|
||||||
|
} else {
|
||||||
|
startPollingManagementClient(managementEndpoint, instanceId, token);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Initializes Orbit-based management by:
|
||||||
|
* - creating the management database in metadata
|
||||||
|
* - generating a key pair for credentials encryption
|
||||||
|
* - generating an instance-unique ID
|
||||||
|
* - getting an authentication token for the API
|
||||||
|
* - loading and applying the latest cached overlay configuration
|
||||||
|
* - starting a configuration update and metrics push background task
|
||||||
|
*
|
||||||
|
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
|
||||||
|
* initialization process
|
||||||
|
* @param {function} callback Function to call once the overlay is loaded
|
||||||
|
* (overlay)
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function initManagement(log, callback) {
|
||||||
|
if ((process.env.REMOTE_MANAGEMENT_DISABLE &&
|
||||||
|
process.env.REMOTE_MANAGEMENT_DISABLE !== '0')
|
||||||
|
|| process.env.S3BACKEND === 'mem') {
|
||||||
|
log.info('remote management disabled');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Temporary check before to fully move to the process management agent. */
|
||||||
|
if (isManagementAgentUsed() ^ typeof callback === 'function') {
|
||||||
|
let msg = 'misuse of initManagement function: ';
|
||||||
|
msg += `MANAGEMENT_USE_AGENT: ${process.env.MANAGEMENT_USE_AGENT}`;
|
||||||
|
msg += `, callback type: ${typeof callback}`;
|
||||||
|
throw new Error(msg);
|
||||||
|
}
|
||||||
|
|
||||||
|
async.waterfall([
|
||||||
|
// eslint-disable-next-line arrow-body-style
|
||||||
|
cb => { return isManagementAgentUsed() ? metadata.setup(cb) : cb(); },
|
||||||
|
cb => initManagementDatabase(log, cb),
|
||||||
|
cb => metadata.getUUID(log, cb),
|
||||||
|
(instanceId, cb) => initManagementCredentials(
|
||||||
|
managementEndpoint, instanceId, log, cb),
|
||||||
|
(instanceId, token, cb) => {
|
||||||
|
if (!isManagementAgentUsed()) {
|
||||||
|
cb(null, instanceId, token, {});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
loadCachedOverlay(log, (err, overlay) => cb(err, instanceId,
|
||||||
|
token, overlay));
|
||||||
|
},
|
||||||
|
(instanceId, token, overlay, cb) => {
|
||||||
|
if (!isManagementAgentUsed()) {
|
||||||
|
cb(null, instanceId, token, overlay);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
patchConfiguration(overlay, log,
|
||||||
|
err => cb(err, instanceId, token, overlay));
|
||||||
|
},
|
||||||
|
], (error, instanceId, token, overlay) => {
|
||||||
|
if (error) {
|
||||||
|
log.error('could not initialize remote management, retrying later',
|
||||||
|
{ error: reshapeExceptionError(error),
|
||||||
|
method: 'initManagement' });
|
||||||
|
setTimeout(initManagement,
|
||||||
|
initRemoteManagementRetryDelay,
|
||||||
|
logger.newRequestLogger());
|
||||||
|
} else {
|
||||||
|
log.info(`this deployment's Instance ID is ${instanceId}`);
|
||||||
|
log.end('management init done');
|
||||||
|
startManagementListeners(instanceId, token);
|
||||||
|
if (callback) {
|
||||||
|
callback(overlay);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
initManagement,
|
||||||
|
initManagementDatabase,
|
||||||
|
};
|
|
@ -0,0 +1,157 @@
|
||||||
|
const arsenal = require('arsenal');
|
||||||
|
const async = require('async');
|
||||||
|
const request = require('../utilities/request');
|
||||||
|
|
||||||
|
const _config = require('../Config').config;
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
const {
|
||||||
|
loadCachedOverlay,
|
||||||
|
patchConfiguration,
|
||||||
|
saveConfigurationVersion,
|
||||||
|
} = require('./configuration');
|
||||||
|
const { reshapeExceptionError } = arsenal.errorUtils;
|
||||||
|
|
||||||
|
const pushReportDelay = 30000;
|
||||||
|
const pullConfigurationOverlayDelay = 60000;
|
||||||
|
|
||||||
|
function loadRemoteOverlay(
|
||||||
|
managementEndpoint, instanceId, remoteToken, cachedOverlay, log, cb) {
|
||||||
|
log.debug('loading remote overlay');
|
||||||
|
const opts = {
|
||||||
|
headers: {
|
||||||
|
'x-instance-authentication-token': remoteToken,
|
||||||
|
'x-scal-request-id': log.getSerializedUids(),
|
||||||
|
},
|
||||||
|
json: true,
|
||||||
|
};
|
||||||
|
request.get(`${managementEndpoint}/${instanceId}/config/overlay`, opts,
|
||||||
|
(error, response, body) => {
|
||||||
|
if (error) {
|
||||||
|
return cb(error);
|
||||||
|
}
|
||||||
|
if (response.statusCode === 200) {
|
||||||
|
return cb(null, cachedOverlay, body);
|
||||||
|
}
|
||||||
|
if (response.statusCode === 404) {
|
||||||
|
return cb(null, cachedOverlay, {});
|
||||||
|
}
|
||||||
|
return cb(arsenal.errors.AccessForbidden, cachedOverlay, {});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO save only after successful patch
|
||||||
|
function applyConfigurationOverlay(
|
||||||
|
managementEndpoint, instanceId, remoteToken, log) {
|
||||||
|
async.waterfall([
|
||||||
|
wcb => loadCachedOverlay(log, wcb),
|
||||||
|
(cachedOverlay, wcb) => patchConfiguration(cachedOverlay,
|
||||||
|
log, wcb),
|
||||||
|
(cachedOverlay, wcb) =>
|
||||||
|
loadRemoteOverlay(managementEndpoint, instanceId, remoteToken,
|
||||||
|
cachedOverlay, log, wcb),
|
||||||
|
(cachedOverlay, remoteOverlay, wcb) =>
|
||||||
|
saveConfigurationVersion(cachedOverlay, remoteOverlay, log, wcb),
|
||||||
|
(remoteOverlay, wcb) => patchConfiguration(remoteOverlay,
|
||||||
|
log, wcb),
|
||||||
|
], error => {
|
||||||
|
if (error) {
|
||||||
|
log.error('could not apply managed configuration',
|
||||||
|
{ error: reshapeExceptionError(error),
|
||||||
|
method: 'applyConfigurationOverlay' });
|
||||||
|
}
|
||||||
|
setTimeout(applyConfigurationOverlay, pullConfigurationOverlayDelay,
|
||||||
|
managementEndpoint, instanceId, remoteToken,
|
||||||
|
logger.newRequestLogger());
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function postStats(managementEndpoint, instanceId, remoteToken, report, next) {
|
||||||
|
const toURL = `${managementEndpoint}/${instanceId}/stats`;
|
||||||
|
const toOptions = {
|
||||||
|
json: true,
|
||||||
|
headers: {
|
||||||
|
'content-type': 'application/json',
|
||||||
|
'x-instance-authentication-token': remoteToken,
|
||||||
|
},
|
||||||
|
body: report,
|
||||||
|
};
|
||||||
|
const toCallback = (err, response, body) => {
|
||||||
|
if (err) {
|
||||||
|
logger.info('could not post stats', { error: err });
|
||||||
|
}
|
||||||
|
if (response && response.statusCode !== 201) {
|
||||||
|
logger.info('could not post stats', {
|
||||||
|
body,
|
||||||
|
statusCode: response.statusCode,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
if (next) {
|
||||||
|
next(null, instanceId, remoteToken);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
return request.post(toURL, toOptions, toCallback);
|
||||||
|
}
|
||||||
|
|
||||||
|
function getStats(next) {
|
||||||
|
const fromURL = `http://localhost:${_config.port}/_/report`;
|
||||||
|
const fromOptions = {
|
||||||
|
headers: {
|
||||||
|
'x-scal-report-token': process.env.REPORT_TOKEN,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
return request.get(fromURL, fromOptions, next);
|
||||||
|
}
|
||||||
|
|
||||||
|
function pushStats(managementEndpoint, instanceId, remoteToken, next) {
|
||||||
|
if (process.env.PUSH_STATS === 'false') {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
getStats((err, res, report) => {
|
||||||
|
if (err) {
|
||||||
|
logger.info('could not retrieve stats', { error: err });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.debug('report', { report });
|
||||||
|
postStats(
|
||||||
|
managementEndpoint,
|
||||||
|
instanceId,
|
||||||
|
remoteToken,
|
||||||
|
report,
|
||||||
|
next
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
});
|
||||||
|
|
||||||
|
setTimeout(pushStats, pushReportDelay,
|
||||||
|
managementEndpoint, instanceId, remoteToken);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Starts background task that updates configuration and pushes stats.
|
||||||
|
*
|
||||||
|
* Periodically polls for configuration updates, and pushes stats at
|
||||||
|
* a fixed interval.
|
||||||
|
*
|
||||||
|
* @param {string} managementEndpoint API endpoint
|
||||||
|
* @param {string} instanceId UUID of this deployment
|
||||||
|
* @param {string} remoteToken API authentication token
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function startPollingManagementClient(
|
||||||
|
managementEndpoint, instanceId, remoteToken) {
|
||||||
|
metadata.notifyBucketChange(() => {
|
||||||
|
pushStats(managementEndpoint, instanceId, remoteToken);
|
||||||
|
});
|
||||||
|
|
||||||
|
pushStats(managementEndpoint, instanceId, remoteToken);
|
||||||
|
applyConfigurationOverlay(managementEndpoint, instanceId, remoteToken,
|
||||||
|
logger.newRequestLogger());
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
startPollingManagementClient,
|
||||||
|
};
|
|
@ -0,0 +1,301 @@
|
||||||
|
const arsenal = require('arsenal');
|
||||||
|
const HttpsProxyAgent = require('https-proxy-agent');
|
||||||
|
const net = require('net');
|
||||||
|
const request = require('../utilities/request');
|
||||||
|
const { URL } = require('url');
|
||||||
|
const WebSocket = require('ws');
|
||||||
|
const assert = require('assert');
|
||||||
|
const http = require('http');
|
||||||
|
|
||||||
|
const _config = require('../Config').config;
|
||||||
|
const logger = require('../utilities/logger');
|
||||||
|
const metadata = require('../metadata/wrapper');
|
||||||
|
|
||||||
|
const { reshapeExceptionError } = arsenal.errorUtils;
|
||||||
|
const { isManagementAgentUsed } = require('./agentClient');
|
||||||
|
const { applyAndSaveOverlay } = require('./configuration');
|
||||||
|
const {
|
||||||
|
ChannelMessageV0,
|
||||||
|
MessageType,
|
||||||
|
} = require('./ChannelMessageV0');
|
||||||
|
|
||||||
|
const {
|
||||||
|
CONFIG_OVERLAY_MESSAGE,
|
||||||
|
METRICS_REQUEST_MESSAGE,
|
||||||
|
CHANNEL_CLOSE_MESSAGE,
|
||||||
|
CHANNEL_PAYLOAD_MESSAGE,
|
||||||
|
} = MessageType;
|
||||||
|
|
||||||
|
const PING_INTERVAL_MS = 10000;
|
||||||
|
const subprotocols = [ChannelMessageV0.protocolName];
|
||||||
|
|
||||||
|
const cloudServerHost = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_HOST
|
||||||
|
|| 'localhost';
|
||||||
|
const cloudServerPort = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_PORT
|
||||||
|
|| _config.port;
|
||||||
|
|
||||||
|
let overlayMessageListener = null;
|
||||||
|
let connected = false;
|
||||||
|
|
||||||
|
// No wildcard nor cidr/mask match for now
|
||||||
|
function createWSAgent(pushEndpoint, env, log) {
|
||||||
|
const url = new URL(pushEndpoint);
|
||||||
|
const noProxy = (env.NO_PROXY || env.no_proxy
|
||||||
|
|| '').split(',');
|
||||||
|
|
||||||
|
if (noProxy.includes(url.hostname)) {
|
||||||
|
log.info('push server ws has proxy exclusion', { noProxy });
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (url.protocol === 'https:' || url.protocol === 'wss:') {
|
||||||
|
const httpsProxy = (env.HTTPS_PROXY || env.https_proxy);
|
||||||
|
if (httpsProxy) {
|
||||||
|
log.info('push server ws using https proxy', { httpsProxy });
|
||||||
|
return new HttpsProxyAgent(httpsProxy);
|
||||||
|
}
|
||||||
|
} else if (url.protocol === 'http:' || url.protocol === 'ws:') {
|
||||||
|
const httpProxy = (env.HTTP_PROXY || env.http_proxy);
|
||||||
|
if (httpProxy) {
|
||||||
|
log.info('push server ws using http proxy', { httpProxy });
|
||||||
|
return new HttpsProxyAgent(httpProxy);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const allProxy = (env.ALL_PROXY || env.all_proxy);
|
||||||
|
if (allProxy) {
|
||||||
|
log.info('push server ws using wildcard proxy', { allProxy });
|
||||||
|
return new HttpsProxyAgent(allProxy);
|
||||||
|
}
|
||||||
|
|
||||||
|
log.info('push server ws not using proxy');
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Starts background task that updates configuration and pushes stats.
|
||||||
|
*
|
||||||
|
* Receives pushed Websocket messages on configuration updates, and
|
||||||
|
* sends stat messages in response to API sollicitations.
|
||||||
|
*
|
||||||
|
* @param {string} url API endpoint
|
||||||
|
* @param {string} token API authentication token
|
||||||
|
* @param {function} cb end-of-connection callback
|
||||||
|
*
|
||||||
|
* @returns {undefined}
|
||||||
|
*/
|
||||||
|
function startWSManagementClient(url, token, cb) {
|
||||||
|
logger.info('connecting to push server', { url });
|
||||||
|
function _logError(error, errorMessage, method) {
|
||||||
|
if (error) {
|
||||||
|
logger.error(`management client error: ${errorMessage}`,
|
||||||
|
{ error: reshapeExceptionError(error), method });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const socketsByChannelId = [];
|
||||||
|
const headers = {
|
||||||
|
'x-instance-authentication-token': token,
|
||||||
|
};
|
||||||
|
const agent = createWSAgent(url, process.env, logger);
|
||||||
|
|
||||||
|
const ws = new WebSocket(url, subprotocols, { headers, agent });
|
||||||
|
let pingTimeout = null;
|
||||||
|
|
||||||
|
function sendPing() {
|
||||||
|
if (ws.readyState === ws.OPEN) {
|
||||||
|
ws.ping(err => _logError(err, 'failed to send a ping', 'sendPing'));
|
||||||
|
}
|
||||||
|
pingTimeout = setTimeout(() => ws.terminate(), PING_INTERVAL_MS);
|
||||||
|
}
|
||||||
|
|
||||||
|
function initiatePing() {
|
||||||
|
clearTimeout(pingTimeout);
|
||||||
|
setTimeout(sendPing, PING_INTERVAL_MS);
|
||||||
|
}
|
||||||
|
|
||||||
|
function pushStats(options) {
|
||||||
|
if (process.env.PUSH_STATS === 'false') {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const fromURL = `http://${cloudServerHost}:${cloudServerPort}/_/report`;
|
||||||
|
const fromOptions = {
|
||||||
|
json: true,
|
||||||
|
headers: {
|
||||||
|
'x-scal-report-token': process.env.REPORT_TOKEN,
|
||||||
|
'x-scal-report-skip-cache': Boolean(options && options.noCache),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
request.get(fromURL, fromOptions, (err, response, body) => {
|
||||||
|
if (err) {
|
||||||
|
_logError(err, 'failed to get metrics report', 'pushStats');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
ws.send(ChannelMessageV0.encodeMetricsReportMessage(body),
|
||||||
|
err => _logError(err, 'failed to send metrics report message',
|
||||||
|
'pushStats'));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function closeChannel(channelId) {
|
||||||
|
const socket = socketsByChannelId[channelId];
|
||||||
|
if (socket) {
|
||||||
|
socket.destroy();
|
||||||
|
delete socketsByChannelId[channelId];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function receiveChannelData(channelId, payload) {
|
||||||
|
let socket = socketsByChannelId[channelId];
|
||||||
|
if (!socket) {
|
||||||
|
socket = net.createConnection(cloudServerPort, cloudServerHost);
|
||||||
|
|
||||||
|
socket.on('data', data => {
|
||||||
|
ws.send(ChannelMessageV0.
|
||||||
|
encodeChannelDataMessage(channelId, data), err =>
|
||||||
|
_logError(err, 'failed to send channel data message',
|
||||||
|
'receiveChannelData'));
|
||||||
|
});
|
||||||
|
|
||||||
|
socket.on('connect', () => {
|
||||||
|
});
|
||||||
|
|
||||||
|
socket.on('drain', () => {
|
||||||
|
});
|
||||||
|
|
||||||
|
socket.on('error', error => {
|
||||||
|
logger.error('failed to connect to S3', {
|
||||||
|
code: error.code,
|
||||||
|
host: error.address,
|
||||||
|
port: error.port,
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
socket.on('end', () => {
|
||||||
|
socket.destroy();
|
||||||
|
socketsByChannelId[channelId] = null;
|
||||||
|
ws.send(ChannelMessageV0.encodeChannelCloseMessage(channelId),
|
||||||
|
err => _logError(err,
|
||||||
|
'failed to send channel close message',
|
||||||
|
'receiveChannelData'));
|
||||||
|
});
|
||||||
|
|
||||||
|
socketsByChannelId[channelId] = socket;
|
||||||
|
}
|
||||||
|
socket.write(payload);
|
||||||
|
}
|
||||||
|
|
||||||
|
function browserAccessChangeHandler() {
|
||||||
|
if (!_config.browserAccessEnabled) {
|
||||||
|
socketsByChannelId.forEach(s => s.close());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ws.on('open', () => {
|
||||||
|
connected = true;
|
||||||
|
logger.info('connected to push server');
|
||||||
|
|
||||||
|
metadata.notifyBucketChange(() => {
|
||||||
|
pushStats({ noCache: true });
|
||||||
|
});
|
||||||
|
_config.on('browser-access-enabled-change', browserAccessChangeHandler);
|
||||||
|
|
||||||
|
initiatePing();
|
||||||
|
});
|
||||||
|
|
||||||
|
const cbOnce = cb ? arsenal.jsutil.once(cb) : null;
|
||||||
|
|
||||||
|
ws.on('close', () => {
|
||||||
|
logger.info('disconnected from push server, reconnecting in 10s');
|
||||||
|
metadata.notifyBucketChange(null);
|
||||||
|
_config.removeListener('browser-access-enabled-change',
|
||||||
|
browserAccessChangeHandler);
|
||||||
|
setTimeout(startWSManagementClient, 10000, url, token);
|
||||||
|
connected = false;
|
||||||
|
|
||||||
|
if (cbOnce) {
|
||||||
|
process.nextTick(cbOnce);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('error', err => {
|
||||||
|
connected = false;
|
||||||
|
logger.error('error from push server connection', {
|
||||||
|
error: err,
|
||||||
|
errorMessage: err.message,
|
||||||
|
});
|
||||||
|
if (cbOnce) {
|
||||||
|
process.nextTick(cbOnce, err);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('ping', () => {
|
||||||
|
ws.pong(err => _logError(err, 'failed to send a pong'));
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('pong', () => {
|
||||||
|
initiatePing();
|
||||||
|
});
|
||||||
|
|
||||||
|
ws.on('message', data => {
|
||||||
|
const log = logger.newRequestLogger();
|
||||||
|
const message = new ChannelMessageV0(data);
|
||||||
|
switch (message.getType()) {
|
||||||
|
case CONFIG_OVERLAY_MESSAGE:
|
||||||
|
if (!isManagementAgentUsed()) {
|
||||||
|
applyAndSaveOverlay(JSON.parse(message.getPayload()), log);
|
||||||
|
} else {
|
||||||
|
if (overlayMessageListener) {
|
||||||
|
overlayMessageListener(message.getPayload().toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case METRICS_REQUEST_MESSAGE:
|
||||||
|
pushStats();
|
||||||
|
break;
|
||||||
|
case CHANNEL_CLOSE_MESSAGE:
|
||||||
|
closeChannel(message.getChannelNumber());
|
||||||
|
break;
|
||||||
|
case CHANNEL_PAYLOAD_MESSAGE:
|
||||||
|
// browserAccessEnabled defaults to true unless explicitly false
|
||||||
|
if (_config.browserAccessEnabled !== false) {
|
||||||
|
receiveChannelData(
|
||||||
|
message.getChannelNumber(), message.getPayload());
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
logger.error('unknown message type from push server',
|
||||||
|
{ messageType: message.getType() });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
function addOverlayMessageListener(callback) {
|
||||||
|
assert(typeof callback === 'function');
|
||||||
|
overlayMessageListener = callback;
|
||||||
|
}
|
||||||
|
|
||||||
|
function startPushConnectionHealthCheckServer(cb) {
|
||||||
|
const server = http.createServer((req, res) => {
|
||||||
|
if (req.url !== '/_/healthcheck') {
|
||||||
|
res.writeHead(404);
|
||||||
|
res.write('Not Found');
|
||||||
|
} else if (connected) {
|
||||||
|
res.writeHead(200);
|
||||||
|
res.write('Connected');
|
||||||
|
} else {
|
||||||
|
res.writeHead(503);
|
||||||
|
res.write('Not Connected');
|
||||||
|
}
|
||||||
|
res.end();
|
||||||
|
});
|
||||||
|
|
||||||
|
server.listen(_config.port, cb);
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
createWSAgent,
|
||||||
|
startWSManagementClient,
|
||||||
|
startPushConnectionHealthCheckServer,
|
||||||
|
addOverlayMessageListener,
|
||||||
|
};
|
|
@ -245,7 +245,7 @@ function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log,
|
||||||
// withVersionId cover cases when an object is being restored with a specific version ID.
|
// withVersionId cover cases when an object is being restored with a specific version ID.
|
||||||
// In this case, the storage space was already accounted for when the RestoreObject API call
|
// In this case, the storage space was already accounted for when the RestoreObject API call
|
||||||
// was made, so we don't need to add any inflight, but quota must be evaluated.
|
// was made, so we don't need to add any inflight, but quota must be evaluated.
|
||||||
if (!checkQuota) {
|
if (!checkQuota && !withVersionId) {
|
||||||
return next(null, bucket, objMD);
|
return next(null, bucket, objMD);
|
||||||
}
|
}
|
||||||
const contentLength = processBytesToWrite(request.apiMethod, bucket, versionId,
|
const contentLength = processBytesToWrite(request.apiMethod, bucket, versionId,
|
||||||
|
|
|
@ -2,9 +2,9 @@ const MetadataWrapper = require('arsenal').storage.metadata.MetadataWrapper;
|
||||||
const { config } = require('../Config');
|
const { config } = require('../Config');
|
||||||
const logger = require('../utilities/logger');
|
const logger = require('../utilities/logger');
|
||||||
const constants = require('../../constants');
|
const constants = require('../../constants');
|
||||||
|
const bucketclient = require('bucketclient');
|
||||||
|
|
||||||
const clientName = config.backends.metadata;
|
const clientName = config.backends.metadata;
|
||||||
let bucketclient;
|
|
||||||
let params;
|
let params;
|
||||||
if (clientName === 'mem') {
|
if (clientName === 'mem') {
|
||||||
params = {};
|
params = {};
|
||||||
|
@ -21,7 +21,6 @@ if (clientName === 'mem') {
|
||||||
noDbOpen: null,
|
noDbOpen: null,
|
||||||
};
|
};
|
||||||
} else if (clientName === 'scality') {
|
} else if (clientName === 'scality') {
|
||||||
bucketclient = require('bucketclient');
|
|
||||||
params = {
|
params = {
|
||||||
bucketdBootstrap: config.bucketd.bootstrap,
|
bucketdBootstrap: config.bucketd.bootstrap,
|
||||||
bucketdLog: config.bucketd.log,
|
bucketdLog: config.bucketd.log,
|
||||||
|
|
|
@ -18,6 +18,11 @@ const locationStorageCheck =
|
||||||
require('./api/apiUtils/object/locationStorageCheck');
|
require('./api/apiUtils/object/locationStorageCheck');
|
||||||
const vault = require('./auth/vault');
|
const vault = require('./auth/vault');
|
||||||
const metadata = require('./metadata/wrapper');
|
const metadata = require('./metadata/wrapper');
|
||||||
|
const { initManagement } = require('./management');
|
||||||
|
const {
|
||||||
|
initManagementClient,
|
||||||
|
isManagementAgentUsed,
|
||||||
|
} = require('./management/agentClient');
|
||||||
|
|
||||||
const HttpAgent = require('agentkeepalive');
|
const HttpAgent = require('agentkeepalive');
|
||||||
const QuotaService = require('./quotas/quotas');
|
const QuotaService = require('./quotas/quotas');
|
||||||
|
@ -51,6 +56,7 @@ const STATS_INTERVAL = 5; // 5 seconds
|
||||||
const STATS_EXPIRY = 30; // 30 seconds
|
const STATS_EXPIRY = 30; // 30 seconds
|
||||||
const statsClient = new StatsClient(localCacheClient, STATS_INTERVAL,
|
const statsClient = new StatsClient(localCacheClient, STATS_INTERVAL,
|
||||||
STATS_EXPIRY);
|
STATS_EXPIRY);
|
||||||
|
const enableRemoteManagement = true;
|
||||||
|
|
||||||
class S3Server {
|
class S3Server {
|
||||||
/**
|
/**
|
||||||
|
@ -321,13 +327,26 @@ class S3Server {
|
||||||
QuotaService?.setup(log);
|
QuotaService?.setup(log);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO this should wait for metadata healthcheck to be ok
|
||||||
|
// TODO only do this in cluster master
|
||||||
|
if (enableRemoteManagement) {
|
||||||
|
if (!isManagementAgentUsed()) {
|
||||||
|
setTimeout(() => {
|
||||||
|
initManagement(logger.newRequestLogger());
|
||||||
|
}, 5000);
|
||||||
|
} else {
|
||||||
|
initManagementClient();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
this.started = true;
|
this.started = true;
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
function main() {
|
function main() {
|
||||||
let workers = _config.workers || 1;
|
// TODO: change config to use workers prop. name for clarity
|
||||||
|
let workers = _config.clusters || 1;
|
||||||
if (process.env.S3BACKEND === 'mem') {
|
if (process.env.S3BACKEND === 'mem') {
|
||||||
workers = 1;
|
workers = 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,12 +1,16 @@
|
||||||
require('werelogs').stderrUtils.catchAndTimestampStderr();
|
|
||||||
const _config = require('../Config').config;
|
const _config = require('../Config').config;
|
||||||
const { utapiVersion, UtapiServer: utapiServer } = require('utapi');
|
const { utapiVersion, UtapiServer: utapiServer } = require('utapi');
|
||||||
const vault = require('../auth/vault');
|
|
||||||
|
|
||||||
// start utapi server
|
// start utapi server
|
||||||
if (utapiVersion === 1 && _config.utapi) {
|
if (utapiVersion === 1 && _config.utapi) {
|
||||||
const fullConfig = Object.assign({}, _config.utapi,
|
const fullConfig = Object.assign({}, _config.utapi,
|
||||||
{ redis: _config.redis, vaultclient: vault });
|
{ redis: _config.redis });
|
||||||
|
if (_config.vaultd) {
|
||||||
|
Object.assign(fullConfig, { vaultd: _config.vaultd });
|
||||||
|
}
|
||||||
|
if (_config.https) {
|
||||||
|
Object.assign(fullConfig, { https: _config.https });
|
||||||
|
}
|
||||||
// copy healthcheck IPs
|
// copy healthcheck IPs
|
||||||
if (_config.healthChecks) {
|
if (_config.healthChecks) {
|
||||||
Object.assign(fullConfig, { healthChecks: _config.healthChecks });
|
Object.assign(fullConfig, { healthChecks: _config.healthChecks });
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
require('werelogs').stderrUtils.catchAndTimestampStderr();
|
|
||||||
const UtapiReindex = require('utapi').UtapiReindex;
|
const UtapiReindex = require('utapi').UtapiReindex;
|
||||||
const { config } = require('../Config');
|
const { config } = require('../Config');
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
require('werelogs').stderrUtils.catchAndTimestampStderr();
|
|
||||||
const UtapiReplay = require('utapi').UtapiReplay;
|
const UtapiReplay = require('utapi').UtapiReplay;
|
||||||
const _config = require('../Config').config;
|
const _config = require('../Config').config;
|
||||||
|
|
||||||
|
|
|
@ -1,11 +1,7 @@
|
||||||
const { configure, Werelogs } = require('werelogs');
|
const { Werelogs } = require('werelogs');
|
||||||
|
|
||||||
const _config = require('../Config.js').config;
|
const _config = require('../Config.js').config;
|
||||||
|
|
||||||
configure({
|
|
||||||
level: _config.log.logLevel,
|
|
||||||
dump: _config.log.dumpLevel,
|
|
||||||
});
|
|
||||||
const werelogs = new Werelogs({
|
const werelogs = new Werelogs({
|
||||||
level: _config.log.logLevel,
|
level: _config.log.logLevel,
|
||||||
dump: _config.log.dumpLevel,
|
dump: _config.log.dumpLevel,
|
||||||
|
|
|
@ -1,12 +0,0 @@
|
||||||
{
|
|
||||||
"STANDARD": {
|
|
||||||
"type": "vitastor",
|
|
||||||
"objectId": "std",
|
|
||||||
"legacyAwsBehavior": true,
|
|
||||||
"details": {
|
|
||||||
"config_path": "/etc/vitastor/vitastor.conf",
|
|
||||||
"pool_id": 3,
|
|
||||||
"metadata_image": "s3-volume-meta"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,179 @@
|
||||||
|
const Uuid = require('uuid');
|
||||||
|
const WebSocket = require('ws');
|
||||||
|
|
||||||
|
const logger = require('./lib/utilities/logger');
|
||||||
|
const { initManagement } = require('./lib/management');
|
||||||
|
const _config = require('./lib/Config').config;
|
||||||
|
const { managementAgentMessageType } = require('./lib/management/agentClient');
|
||||||
|
const { addOverlayMessageListener } = require('./lib/management/push');
|
||||||
|
const { saveConfigurationVersion } = require('./lib/management/configuration');
|
||||||
|
|
||||||
|
|
||||||
|
// TODO: auth?
|
||||||
|
// TODO: werelogs with a specific name.
|
||||||
|
|
||||||
|
const CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS = 15000;
|
||||||
|
|
||||||
|
|
||||||
|
class ManagementAgentServer {
|
||||||
|
constructor() {
|
||||||
|
this.port = _config.managementAgent.port || 8010;
|
||||||
|
this.wss = null;
|
||||||
|
this.loadedOverlay = null;
|
||||||
|
|
||||||
|
this.stop = this.stop.bind(this);
|
||||||
|
process.on('SIGINT', this.stop);
|
||||||
|
process.on('SIGHUP', this.stop);
|
||||||
|
process.on('SIGQUIT', this.stop);
|
||||||
|
process.on('SIGTERM', this.stop);
|
||||||
|
process.on('SIGPIPE', () => {});
|
||||||
|
}
|
||||||
|
|
||||||
|
start(_cb) {
|
||||||
|
const cb = _cb || function noop() {};
|
||||||
|
|
||||||
|
/* Define REPORT_TOKEN env variable needed by the management
|
||||||
|
* module. */
|
||||||
|
process.env.REPORT_TOKEN = process.env.REPORT_TOKEN
|
||||||
|
|| _config.reportToken
|
||||||
|
|| Uuid.v4();
|
||||||
|
|
||||||
|
initManagement(logger.newRequestLogger(), overlay => {
|
||||||
|
let error = null;
|
||||||
|
|
||||||
|
if (overlay) {
|
||||||
|
this.loadedOverlay = overlay;
|
||||||
|
this.startServer();
|
||||||
|
} else {
|
||||||
|
error = new Error('failed to init management');
|
||||||
|
}
|
||||||
|
return cb(error);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
stop() {
|
||||||
|
if (!this.wss) {
|
||||||
|
process.exit(0);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.wss.close(() => {
|
||||||
|
logger.info('server shutdown');
|
||||||
|
process.exit(0);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
startServer() {
|
||||||
|
this.wss = new WebSocket.Server({
|
||||||
|
port: this.port,
|
||||||
|
clientTracking: true,
|
||||||
|
path: '/watch',
|
||||||
|
});
|
||||||
|
|
||||||
|
this.wss.on('connection', this.onConnection.bind(this));
|
||||||
|
this.wss.on('listening', this.onListening.bind(this));
|
||||||
|
this.wss.on('error', this.onError.bind(this));
|
||||||
|
|
||||||
|
setInterval(this.checkBrokenConnections.bind(this),
|
||||||
|
CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS);
|
||||||
|
|
||||||
|
addOverlayMessageListener(this.onNewOverlay.bind(this));
|
||||||
|
}
|
||||||
|
|
||||||
|
onConnection(socket, request) {
|
||||||
|
function hearthbeat() {
|
||||||
|
this.isAlive = true;
|
||||||
|
}
|
||||||
|
logger.info('client connected to watch route', {
|
||||||
|
ip: request.connection.remoteAddress,
|
||||||
|
});
|
||||||
|
|
||||||
|
/* eslint-disable no-param-reassign */
|
||||||
|
socket.isAlive = true;
|
||||||
|
socket.on('pong', hearthbeat.bind(socket));
|
||||||
|
|
||||||
|
if (socket.readyState !== socket.OPEN) {
|
||||||
|
logger.error('client socket not in ready state', {
|
||||||
|
state: socket.readyState,
|
||||||
|
client: socket._socket._peername,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const msg = {
|
||||||
|
messageType: managementAgentMessageType.NEW_OVERLAY,
|
||||||
|
payload: this.loadedOverlay,
|
||||||
|
};
|
||||||
|
socket.send(JSON.stringify(msg), error => {
|
||||||
|
if (error) {
|
||||||
|
logger.error('failed to send remoteOverlay to client', {
|
||||||
|
error,
|
||||||
|
client: socket._socket._peername,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
onListening() {
|
||||||
|
logger.info('websocket server listening',
|
||||||
|
{ port: this.port });
|
||||||
|
}
|
||||||
|
|
||||||
|
onError(error) {
|
||||||
|
logger.error('websocket server error', { error });
|
||||||
|
}
|
||||||
|
|
||||||
|
_sendNewOverlayToClient(client) {
|
||||||
|
if (client.readyState !== client.OPEN) {
|
||||||
|
logger.error('client socket not in ready state', {
|
||||||
|
state: client.readyState,
|
||||||
|
client: client._socket._peername,
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const msg = {
|
||||||
|
messageType: managementAgentMessageType.NEW_OVERLAY,
|
||||||
|
payload: this.loadedOverlay,
|
||||||
|
};
|
||||||
|
client.send(JSON.stringify(msg), error => {
|
||||||
|
if (error) {
|
||||||
|
logger.error(
|
||||||
|
'failed to send remoteOverlay to management agent client', {
|
||||||
|
error, client: client._socket._peername,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
onNewOverlay(remoteOverlay) {
|
||||||
|
const remoteOverlayObj = JSON.parse(remoteOverlay);
|
||||||
|
saveConfigurationVersion(
|
||||||
|
this.loadedOverlay, remoteOverlayObj, logger, err => {
|
||||||
|
if (err) {
|
||||||
|
logger.error('failed to save remote overlay', { err });
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
this.loadedOverlay = remoteOverlayObj;
|
||||||
|
this.wss.clients.forEach(
|
||||||
|
this._sendNewOverlayToClient.bind(this)
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
checkBrokenConnections() {
|
||||||
|
this.wss.clients.forEach(client => {
|
||||||
|
if (!client.isAlive) {
|
||||||
|
logger.info('close broken connection', {
|
||||||
|
client: client._socket._peername,
|
||||||
|
});
|
||||||
|
client.terminate();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
client.isAlive = false;
|
||||||
|
client.ping();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const server = new ManagementAgentServer();
|
||||||
|
server.start();
|
|
@ -193,123 +193,7 @@ tests:
|
||||||
exp_labels:
|
exp_labels:
|
||||||
severity: critical
|
severity: critical
|
||||||
|
|
||||||
# QuotaMetricsNotAvailable (case with bucket quota)
|
# QuotaMetricsNotAvailable
|
||||||
##################################################################################################
|
|
||||||
- name: Quota metrics not available (bucket quota)
|
|
||||||
interval: 1m
|
|
||||||
input_series:
|
|
||||||
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
|
|
||||||
values: 1+1x6 0+0x20 1+1x6
|
|
||||||
- series: s3_cloudserver_quota_buckets_count{namespace="zenko",job="artesca-data-ops-report-handler"}
|
|
||||||
values: 1+1x32
|
|
||||||
alert_rule_test:
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 6m
|
|
||||||
exp_alerts: []
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 15m
|
|
||||||
exp_alerts:
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: warning
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 20m
|
|
||||||
exp_alerts:
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: warning
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: critical
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 28m
|
|
||||||
exp_alerts: []
|
|
||||||
|
|
||||||
# QuotaMetricsNotAvailable (case with account quota)
|
|
||||||
##################################################################################################
|
|
||||||
- name: Quota metrics not available (account quota)
|
|
||||||
interval: 1m
|
|
||||||
input_series:
|
|
||||||
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
|
|
||||||
values: 1+1x6 0+0x20 1+1x6
|
|
||||||
- series: s3_cloudserver_quota_accounts_count{namespace="zenko",job="artesca-data-ops-report-handler"}
|
|
||||||
values: 1+1x32
|
|
||||||
alert_rule_test:
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 6m
|
|
||||||
exp_alerts: []
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 15m
|
|
||||||
exp_alerts:
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: warning
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 20m
|
|
||||||
exp_alerts:
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: warning
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: critical
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 28m
|
|
||||||
exp_alerts: []
|
|
||||||
|
|
||||||
# QuotaMetricsNotAvailable (case with both quota quota)
|
|
||||||
##################################################################################################
|
|
||||||
- name: Quota metrics not available (account quota)
|
|
||||||
interval: 1m
|
|
||||||
input_series:
|
|
||||||
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
|
|
||||||
values: 1+1x6 0+0x20 1+1x6
|
|
||||||
- series: s3_cloudserver_quota_accounts_count{namespace="zenko",job="artesca-data-ops-report-handler"}
|
|
||||||
values: 1+1x32
|
|
||||||
- series: s3_cloudserver_quota_buckets_count{namespace="zenko",job="artesca-data-ops-report-handler"}
|
|
||||||
values: 1+1x32
|
|
||||||
alert_rule_test:
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 6m
|
|
||||||
exp_alerts: []
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 15m
|
|
||||||
exp_alerts:
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: warning
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 20m
|
|
||||||
exp_alerts:
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: warning
|
|
||||||
- exp_annotations:
|
|
||||||
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
|
||||||
summary: Utilization metrics service not available
|
|
||||||
exp_labels:
|
|
||||||
severity: critical
|
|
||||||
- alertname: QuotaMetricsNotAvailable
|
|
||||||
eval_time: 28m
|
|
||||||
exp_alerts: []
|
|
||||||
|
|
||||||
# QuotaMetricsNotAvailable (case without quota)
|
|
||||||
##################################################################################################
|
##################################################################################################
|
||||||
- name: Utilization service Latency
|
- name: Utilization service Latency
|
||||||
interval: 1m
|
interval: 1m
|
||||||
|
@ -322,10 +206,25 @@ tests:
|
||||||
exp_alerts: []
|
exp_alerts: []
|
||||||
- alertname: QuotaMetricsNotAvailable
|
- alertname: QuotaMetricsNotAvailable
|
||||||
eval_time: 15m
|
eval_time: 15m
|
||||||
exp_alerts: []
|
exp_alerts:
|
||||||
|
- exp_annotations:
|
||||||
|
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
||||||
|
summary: Utilization metrics service not available
|
||||||
|
exp_labels:
|
||||||
|
severity: warning
|
||||||
- alertname: QuotaMetricsNotAvailable
|
- alertname: QuotaMetricsNotAvailable
|
||||||
eval_time: 20m
|
eval_time: 20m
|
||||||
exp_alerts: []
|
exp_alerts:
|
||||||
|
- exp_annotations:
|
||||||
|
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
||||||
|
summary: Utilization metrics service not available
|
||||||
|
exp_labels:
|
||||||
|
severity: warning
|
||||||
|
- exp_annotations:
|
||||||
|
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
|
||||||
|
summary: Utilization metrics service not available
|
||||||
|
exp_labels:
|
||||||
|
severity: critical
|
||||||
- alertname: QuotaMetricsNotAvailable
|
- alertname: QuotaMetricsNotAvailable
|
||||||
eval_time: 28m
|
eval_time: 28m
|
||||||
exp_alerts: []
|
exp_alerts: []
|
||||||
|
|
|
@ -6,9 +6,6 @@ x-inputs:
|
||||||
- name: service
|
- name: service
|
||||||
type: constant
|
type: constant
|
||||||
value: artesca-data-connector-s3api-metrics
|
value: artesca-data-connector-s3api-metrics
|
||||||
- name: reportJob
|
|
||||||
type: constant
|
|
||||||
value: artesca-data-ops-report-handler
|
|
||||||
- name: replicas
|
- name: replicas
|
||||||
type: constant
|
type: constant
|
||||||
- name: systemErrorsWarningThreshold
|
- name: systemErrorsWarningThreshold
|
||||||
|
@ -143,10 +140,9 @@ groups:
|
||||||
# but not available for at least half of the S3 services during the last minute
|
# but not available for at least half of the S3 services during the last minute
|
||||||
- alert: QuotaMetricsNotAvailable
|
- alert: QuotaMetricsNotAvailable
|
||||||
expr: |
|
expr: |
|
||||||
avg(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"})
|
avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"}[1m]))
|
||||||
< ${quotaUnavailabilityThreshold} and
|
< ${quotaUnavailabilityThreshold}
|
||||||
(max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job="${reportJob}"}) > 0 or
|
for: 1m
|
||||||
max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job="${reportJob}"}) > 0)
|
|
||||||
labels:
|
labels:
|
||||||
severity: warning
|
severity: warning
|
||||||
annotations:
|
annotations:
|
||||||
|
@ -157,10 +153,8 @@ groups:
|
||||||
# but not available during the last 10 minutes
|
# but not available during the last 10 minutes
|
||||||
- alert: QuotaMetricsNotAvailable
|
- alert: QuotaMetricsNotAvailable
|
||||||
expr: |
|
expr: |
|
||||||
avg(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"})
|
avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"}[1m]))
|
||||||
< ${quotaUnavailabilityThreshold} and
|
< ${quotaUnavailabilityThreshold}
|
||||||
(max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job="${reportJob}"}) > 0 or
|
|
||||||
max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job="${reportJob}"}) > 0)
|
|
||||||
for: 10m
|
for: 10m
|
||||||
labels:
|
labels:
|
||||||
severity: critical
|
severity: critical
|
||||||
|
|
50
package.json
50
package.json
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "@zenko/cloudserver",
|
"name": "@zenko/cloudserver",
|
||||||
"version": "8.8.27",
|
"version": "8.8.24",
|
||||||
"description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol",
|
"description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol",
|
||||||
"main": "index.js",
|
"main": "index.js",
|
||||||
"engines": {
|
"engines": {
|
||||||
|
@ -21,13 +21,14 @@
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@azure/storage-blob": "^12.12.0",
|
"@azure/storage-blob": "^12.12.0",
|
||||||
"@hapi/joi": "^17.1.0",
|
"@hapi/joi": "^17.1.0",
|
||||||
"arsenal": "git+https://git.yourcmc.ru/vitalif/zenko-arsenal.git#development/8.1",
|
"arsenal": "git+https://github.com/scality/arsenal#8.1.130",
|
||||||
"async": "^2.5.0",
|
"async": "~2.5.0",
|
||||||
"aws-sdk": "^2.905.0",
|
"aws-sdk": "2.905.0",
|
||||||
|
"bucketclient": "scality/bucketclient#8.1.9",
|
||||||
"bufferutil": "^4.0.6",
|
"bufferutil": "^4.0.6",
|
||||||
"commander": "^2.9.0",
|
"commander": "^2.9.0",
|
||||||
"cron-parser": "^2.11.0",
|
"cron-parser": "^2.11.0",
|
||||||
"diskusage": "^1.1.3",
|
"diskusage": "1.1.3",
|
||||||
"google-auto-auth": "^0.9.1",
|
"google-auto-auth": "^0.9.1",
|
||||||
"http-proxy": "^1.17.0",
|
"http-proxy": "^1.17.0",
|
||||||
"http-proxy-agent": "^4.0.1",
|
"http-proxy-agent": "^4.0.1",
|
||||||
|
@ -37,45 +38,38 @@
|
||||||
"mongodb": "^5.2.0",
|
"mongodb": "^5.2.0",
|
||||||
"node-fetch": "^2.6.0",
|
"node-fetch": "^2.6.0",
|
||||||
"node-forge": "^0.7.1",
|
"node-forge": "^0.7.1",
|
||||||
"npm-run-all": "^4.1.5",
|
"npm-run-all": "~4.1.5",
|
||||||
"prom-client": "14.2.0",
|
"prom-client": "14.2.0",
|
||||||
"request": "^2.81.0",
|
"request": "^2.81.0",
|
||||||
"scubaclient": "git+https://git.yourcmc.ru/vitalif/zenko-scubaclient.git",
|
"scubaclient": "git+https://github.com/scality/scubaclient.git",
|
||||||
"sql-where-parser": "^2.2.1",
|
"sql-where-parser": "~2.2.1",
|
||||||
"utapi": "git+https://git.yourcmc.ru/vitalif/zenko-utapi.git",
|
"utapi": "github:scality/utapi#8.1.13",
|
||||||
"utf-8-validate": "^5.0.8",
|
"utf-8-validate": "^5.0.8",
|
||||||
"utf8": "^2.1.1",
|
"utf8": "~2.1.1",
|
||||||
"uuid": "^8.3.2",
|
"uuid": "^8.3.2",
|
||||||
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1",
|
"vaultclient": "scality/vaultclient#8.3.20",
|
||||||
|
"werelogs": "scality/werelogs#8.1.4",
|
||||||
"ws": "^5.1.0",
|
"ws": "^5.1.0",
|
||||||
"xml2js": "^0.4.16"
|
"xml2js": "~0.4.16"
|
||||||
},
|
|
||||||
"overrides": {
|
|
||||||
"ltgt": "^2.2.0"
|
|
||||||
},
|
},
|
||||||
"devDependencies": {
|
"devDependencies": {
|
||||||
"@babel/core": "^7.25.2",
|
|
||||||
"@babel/preset-env": "^7.25.3",
|
|
||||||
"babel-loader": "^9.1.3",
|
|
||||||
"bluebird": "^3.3.1",
|
"bluebird": "^3.3.1",
|
||||||
"eslint": "^8.14.0",
|
"eslint": "^8.14.0",
|
||||||
"eslint-config-airbnb-base": "^15.0.0",
|
"eslint-config-airbnb-base": "^13.1.0",
|
||||||
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git",
|
"eslint-config-scality": "scality/Guidelines#8.2.0",
|
||||||
"eslint-plugin-import": "^2.14.0",
|
"eslint-plugin-import": "^2.14.0",
|
||||||
"eslint-plugin-mocha": "^10.1.0",
|
"eslint-plugin-mocha": "^10.1.0",
|
||||||
"express": "^4.17.1",
|
"express": "^4.17.1",
|
||||||
"ioredis": "^4.9.5",
|
"ioredis": "4.9.5",
|
||||||
"istanbul": "^1.0.0-alpha.2",
|
"istanbul": "1.0.0-alpha.2",
|
||||||
"istanbul-api": "^1.0.0-alpha.13",
|
"istanbul-api": "1.0.0-alpha.13",
|
||||||
"lolex": "^1.4.0",
|
"lolex": "^1.4.0",
|
||||||
"mocha": ">=3.1.2",
|
"mocha": "^2.3.4",
|
||||||
"mocha-junit-reporter": "^1.23.1",
|
"mocha-junit-reporter": "^1.23.1",
|
||||||
"mocha-multi-reporters": "^1.1.7",
|
"mocha-multi-reporters": "^1.1.7",
|
||||||
"node-mocks-http": "^1.5.2",
|
"node-mocks-http": "1.5.2",
|
||||||
"sinon": "^13.0.1",
|
"sinon": "^13.0.1",
|
||||||
"tv4": "^1.2.7",
|
"tv4": "^1.2.7"
|
||||||
"webpack": "^5.93.0",
|
|
||||||
"webpack-cli": "^5.1.4"
|
|
||||||
},
|
},
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"cloudserver": "S3METADATA=mongodb npm-run-all --parallel start_dataserver start_s3server",
|
"cloudserver": "S3METADATA=mongodb npm-run-all --parallel start_dataserver start_s3server",
|
||||||
|
|
|
@ -15,15 +15,13 @@ const sourceObject = 'objectsource';
|
||||||
const sourceVersionId = 'vid1';
|
const sourceVersionId = 'vid1';
|
||||||
|
|
||||||
describe('prepareRequestContexts', () => {
|
describe('prepareRequestContexts', () => {
|
||||||
it('should return s3:DeleteObject if multiObjectDelete method', () => {
|
it('should return null if multiObjectDelete method', () => {
|
||||||
const apiMethod = 'multiObjectDelete';
|
const apiMethod = 'multiObjectDelete';
|
||||||
const request = makeRequest();
|
const request = makeRequest();
|
||||||
const results = prepareRequestContexts(apiMethod, request, sourceBucket,
|
const results = prepareRequestContexts(apiMethod, request, sourceBucket,
|
||||||
sourceObject, sourceVersionId);
|
sourceObject, sourceVersionId);
|
||||||
|
|
||||||
assert.strictEqual(results.length, 1);
|
assert.strictEqual(results, null);
|
||||||
const expectedAction = 's3:DeleteObject';
|
|
||||||
assert.strictEqual(results[0].getAction(), expectedAction);
|
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should return s3:PutObjectVersion request context action for objectPut method with x-scal-s3-version-id' +
|
it('should return s3:PutObjectVersion request context action for objectPut method with x-scal-s3-version-id' +
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
const crypto = require('crypto');
|
|
||||||
const assert = require('assert');
|
const assert = require('assert');
|
||||||
const { errors, storage } = require('arsenal');
|
const { errors, storage } = require('arsenal');
|
||||||
|
|
||||||
|
@ -8,7 +7,6 @@ const multiObjectDelete = require('../../../lib/api/multiObjectDelete');
|
||||||
const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers');
|
const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers');
|
||||||
const DummyRequest = require('../DummyRequest');
|
const DummyRequest = require('../DummyRequest');
|
||||||
const { bucketPut } = require('../../../lib/api/bucketPut');
|
const { bucketPut } = require('../../../lib/api/bucketPut');
|
||||||
const metadataWrapper = require('../../../lib/metadata/wrapper');
|
|
||||||
const objectPut = require('../../../lib/api/objectPut');
|
const objectPut = require('../../../lib/api/objectPut');
|
||||||
const log = new DummyRequestLogger();
|
const log = new DummyRequestLogger();
|
||||||
|
|
||||||
|
@ -27,7 +25,6 @@ const objectKey1 = 'objectName1';
|
||||||
const objectKey2 = 'objectName2';
|
const objectKey2 = 'objectName2';
|
||||||
const metadataUtils = require('../../../lib/metadata/metadataUtils');
|
const metadataUtils = require('../../../lib/metadata/metadataUtils');
|
||||||
const services = require('../../../lib/services');
|
const services = require('../../../lib/services');
|
||||||
const { BucketInfo } = require('arsenal/build/lib/models');
|
|
||||||
const testBucketPutRequest = new DummyRequest({
|
const testBucketPutRequest = new DummyRequest({
|
||||||
bucketName,
|
bucketName,
|
||||||
namespace,
|
namespace,
|
||||||
|
@ -360,43 +357,3 @@ describe('decodeObjectVersion function helper', () => {
|
||||||
assert.deepStrictEqual(ret[1], undefined);
|
assert.deepStrictEqual(ret[1], undefined);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
describe('multiObjectDelete function', () => {
|
|
||||||
afterEach(() => {
|
|
||||||
sinon.restore();
|
|
||||||
});
|
|
||||||
|
|
||||||
it('should not authorize the bucket and initial IAM authorization results', done => {
|
|
||||||
const post = '<Delete><Object><Key>objectname</Key></Object></Delete>';
|
|
||||||
const request = new DummyRequest({
|
|
||||||
bucketName: 'bucketname',
|
|
||||||
objectKey: 'objectname',
|
|
||||||
parsedHost: 'localhost',
|
|
||||||
headers: {
|
|
||||||
'content-md5': crypto.createHash('md5').update(post, 'utf8').digest('base64')
|
|
||||||
},
|
|
||||||
post,
|
|
||||||
url: '/bucketname',
|
|
||||||
});
|
|
||||||
const authInfo = makeAuthInfo('123456');
|
|
||||||
|
|
||||||
sinon.stub(metadataWrapper, 'getBucket').callsFake((bucketName, log, cb) =>
|
|
||||||
cb(null, new BucketInfo(
|
|
||||||
'bucketname',
|
|
||||||
'123456',
|
|
||||||
'accountA',
|
|
||||||
new Date().toISOString(),
|
|
||||||
15,
|
|
||||||
)));
|
|
||||||
|
|
||||||
multiObjectDelete.multiObjectDelete(authInfo, request, log, (err, res) => {
|
|
||||||
// Expected result is an access denied on the object, and no error, as the API was authorized
|
|
||||||
assert.strictEqual(err, null);
|
|
||||||
assert.strictEqual(
|
|
||||||
res.includes('<Error><Key>objectname</Key><Code>AccessDenied</Code>'),
|
|
||||||
true
|
|
||||||
);
|
|
||||||
done();
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
|
@ -0,0 +1,82 @@
|
||||||
|
const assert = require('assert');
|
||||||
|
|
||||||
|
const {
|
||||||
|
createWSAgent,
|
||||||
|
} = require('../../../lib/management/push');
|
||||||
|
|
||||||
|
const proxy = 'http://proxy:3128/';
|
||||||
|
const logger = { info: () => {} };
|
||||||
|
|
||||||
|
function testVariableSet(httpProxy, httpsProxy, allProxy, noProxy) {
|
||||||
|
return () => {
|
||||||
|
it(`should use ${httpProxy} environment variable`, () => {
|
||||||
|
let agent = createWSAgent('https://pushserver', {
|
||||||
|
[httpProxy]: 'http://proxy:3128',
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent, null);
|
||||||
|
|
||||||
|
agent = createWSAgent('http://pushserver', {
|
||||||
|
[httpProxy]: proxy,
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent.proxy.href, proxy);
|
||||||
|
});
|
||||||
|
|
||||||
|
it(`should use ${httpsProxy} environment variable`, () => {
|
||||||
|
let agent = createWSAgent('http://pushserver', {
|
||||||
|
[httpsProxy]: proxy,
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent, null);
|
||||||
|
|
||||||
|
agent = createWSAgent('https://pushserver', {
|
||||||
|
[httpsProxy]: proxy,
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent.proxy.href, proxy);
|
||||||
|
});
|
||||||
|
|
||||||
|
it(`should use ${allProxy} environment variable`, () => {
|
||||||
|
let agent = createWSAgent('http://pushserver', {
|
||||||
|
[allProxy]: proxy,
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent.proxy.href, proxy);
|
||||||
|
|
||||||
|
agent = createWSAgent('https://pushserver', {
|
||||||
|
[allProxy]: proxy,
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent.proxy.href, proxy);
|
||||||
|
});
|
||||||
|
|
||||||
|
it(`should use ${noProxy} environment variable`, () => {
|
||||||
|
let agent = createWSAgent('http://pushserver', {
|
||||||
|
[noProxy]: 'pushserver',
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent, null);
|
||||||
|
|
||||||
|
agent = createWSAgent('http://pushserver', {
|
||||||
|
[noProxy]: 'pushserver',
|
||||||
|
[httpProxy]: proxy,
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent, null);
|
||||||
|
|
||||||
|
agent = createWSAgent('http://pushserver', {
|
||||||
|
[noProxy]: 'pushserver2',
|
||||||
|
[httpProxy]: proxy,
|
||||||
|
}, logger);
|
||||||
|
assert.equal(agent.proxy.href, proxy);
|
||||||
|
});
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('Websocket connection agent', () => {
|
||||||
|
describe('with no proxy env', () => {
|
||||||
|
it('should handle empty proxy environment', () => {
|
||||||
|
const agent = createWSAgent('https://pushserver', {}, logger);
|
||||||
|
assert.equal(agent, null);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('with lowercase proxy env',
|
||||||
|
testVariableSet('http_proxy', 'https_proxy', 'all_proxy', 'no_proxy'));
|
||||||
|
|
||||||
|
describe('with uppercase proxy env',
|
||||||
|
testVariableSet('HTTP_PROXY', 'HTTPS_PROXY', 'ALL_PROXY', 'NO_PROXY'));
|
||||||
|
});
|
|
@ -0,0 +1,239 @@
|
||||||
|
const assert = require('assert');
|
||||||
|
const crypto = require('crypto');
|
||||||
|
|
||||||
|
const { DummyRequestLogger } = require('../helpers');
|
||||||
|
const log = new DummyRequestLogger();
|
||||||
|
|
||||||
|
const metadata = require('../../../lib/metadata/wrapper');
|
||||||
|
const managementDatabaseName = 'PENSIEVE';
|
||||||
|
const tokenConfigurationKey = 'auth/zenko/remote-management-token';
|
||||||
|
|
||||||
|
const { privateKey, accessKey, decryptedSecretKey, secretKey, canonicalId,
|
||||||
|
userName } = require('./resources.json');
|
||||||
|
const shortid = '123456789012';
|
||||||
|
const email = 'customaccount1@setbyenv.com';
|
||||||
|
const arn = 'arn:aws:iam::123456789012:root';
|
||||||
|
const { config } = require('../../../lib/Config');
|
||||||
|
|
||||||
|
const {
|
||||||
|
remoteOverlayIsNewer,
|
||||||
|
patchConfiguration,
|
||||||
|
} = require('../../../lib/management/configuration');
|
||||||
|
|
||||||
|
const {
|
||||||
|
initManagementDatabase,
|
||||||
|
} = require('../../../lib/management/index');
|
||||||
|
|
||||||
|
function initManagementCredentialsMock(cb) {
|
||||||
|
return metadata.putObjectMD(managementDatabaseName,
|
||||||
|
tokenConfigurationKey, { privateKey }, {},
|
||||||
|
log, error => cb(error));
|
||||||
|
}
|
||||||
|
|
||||||
|
function getConfig() {
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Original Config
|
||||||
|
const overlayVersionOriginal = Object.assign({}, config.overlayVersion);
|
||||||
|
const authDataOriginal = Object.assign({}, config.authData);
|
||||||
|
const locationConstraintsOriginal = Object.assign({},
|
||||||
|
config.locationConstraints);
|
||||||
|
const restEndpointsOriginal = Object.assign({}, config.restEndpoints);
|
||||||
|
const browserAccessEnabledOriginal = config.browserAccessEnabled;
|
||||||
|
const instanceId = '19683e55-56f7-4a4c-98a7-706c07e4ec30';
|
||||||
|
const publicInstanceId = crypto.createHash('sha256')
|
||||||
|
.update(instanceId)
|
||||||
|
.digest('hex');
|
||||||
|
|
||||||
|
function resetConfig() {
|
||||||
|
config.overlayVersion = overlayVersionOriginal;
|
||||||
|
config.authData = authDataOriginal;
|
||||||
|
config.locationConstraints = locationConstraintsOriginal;
|
||||||
|
config.restEndpoints = restEndpointsOriginal;
|
||||||
|
config.browserAccessEnabled = browserAccessEnabledOriginal;
|
||||||
|
}
|
||||||
|
|
||||||
|
function assertConfig(actualConf, expectedConf) {
|
||||||
|
Object.keys(expectedConf).forEach(key => {
|
||||||
|
assert.deepStrictEqual(actualConf[key], expectedConf[key]);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
describe('patchConfiguration', () => {
|
||||||
|
before(done => initManagementDatabase(log, err => {
|
||||||
|
if (err) {
|
||||||
|
return done(err);
|
||||||
|
}
|
||||||
|
return initManagementCredentialsMock(done);
|
||||||
|
}));
|
||||||
|
beforeEach(() => {
|
||||||
|
resetConfig();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should modify config using the new config', done => {
|
||||||
|
const newConf = {
|
||||||
|
version: 1,
|
||||||
|
instanceId,
|
||||||
|
users: [
|
||||||
|
{
|
||||||
|
secretKey,
|
||||||
|
accessKey,
|
||||||
|
canonicalId,
|
||||||
|
userName,
|
||||||
|
},
|
||||||
|
],
|
||||||
|
endpoints: [
|
||||||
|
{
|
||||||
|
hostname: '1.1.1.1',
|
||||||
|
locationName: 'us-east-1',
|
||||||
|
},
|
||||||
|
],
|
||||||
|
locations: {
|
||||||
|
'us-east-1': {
|
||||||
|
name: 'us-east-1',
|
||||||
|
objectId: 'us-east-1',
|
||||||
|
locationType: 'location-file-v1',
|
||||||
|
legacyAwsBehavior: true,
|
||||||
|
details: {},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
browserAccess: {
|
||||||
|
enabled: true,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
return patchConfiguration(newConf, log, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
const actualConf = getConfig();
|
||||||
|
const expectedConf = {
|
||||||
|
overlayVersion: 1,
|
||||||
|
publicInstanceId,
|
||||||
|
browserAccessEnabled: true,
|
||||||
|
authData: {
|
||||||
|
accounts: [{
|
||||||
|
name: userName,
|
||||||
|
email,
|
||||||
|
arn,
|
||||||
|
canonicalID: canonicalId,
|
||||||
|
shortid,
|
||||||
|
keys: [{
|
||||||
|
access: accessKey,
|
||||||
|
secret: decryptedSecretKey,
|
||||||
|
}],
|
||||||
|
}],
|
||||||
|
},
|
||||||
|
locationConstraints: {
|
||||||
|
'us-east-1': {
|
||||||
|
type: 'file',
|
||||||
|
objectId: 'us-east-1',
|
||||||
|
legacyAwsBehavior: true,
|
||||||
|
isTransient: false,
|
||||||
|
sizeLimitGB: null,
|
||||||
|
details: { supportsVersioning: true },
|
||||||
|
name: 'us-east-1',
|
||||||
|
locationType: 'location-file-v1',
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
assertConfig(actualConf, expectedConf);
|
||||||
|
assert.deepStrictEqual(actualConf.restEndpoints['1.1.1.1'],
|
||||||
|
'us-east-1');
|
||||||
|
return done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should apply second configuration if version (2) is greater than ' +
|
||||||
|
'overlayVersion (1)', done => {
|
||||||
|
const newConf1 = {
|
||||||
|
version: 1,
|
||||||
|
instanceId,
|
||||||
|
};
|
||||||
|
const newConf2 = {
|
||||||
|
version: 2,
|
||||||
|
instanceId,
|
||||||
|
browserAccess: {
|
||||||
|
enabled: true,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
patchConfiguration(newConf1, log, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return patchConfiguration(newConf2, log, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
const actualConf = getConfig();
|
||||||
|
const expectedConf = {
|
||||||
|
overlayVersion: 2,
|
||||||
|
browserAccessEnabled: true,
|
||||||
|
};
|
||||||
|
assertConfig(actualConf, expectedConf);
|
||||||
|
return done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should not apply the second configuration if version equals ' +
|
||||||
|
'overlayVersion', done => {
|
||||||
|
const newConf1 = {
|
||||||
|
version: 1,
|
||||||
|
instanceId,
|
||||||
|
};
|
||||||
|
const newConf2 = {
|
||||||
|
version: 1,
|
||||||
|
instanceId,
|
||||||
|
browserAccess: {
|
||||||
|
enabled: true,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
patchConfiguration(newConf1, log, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
return patchConfiguration(newConf2, log, err => {
|
||||||
|
assert.ifError(err);
|
||||||
|
const actualConf = getConfig();
|
||||||
|
const expectedConf = {
|
||||||
|
overlayVersion: 1,
|
||||||
|
browserAccessEnabled: undefined,
|
||||||
|
};
|
||||||
|
assertConfig(actualConf, expectedConf);
|
||||||
|
return done();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('remoteOverlayIsNewer', () => {
|
||||||
|
it('should return remoteOverlayIsNewer equals false if remote overlay ' +
|
||||||
|
'is less than the cached', () => {
|
||||||
|
const cachedOverlay = {
|
||||||
|
version: 2,
|
||||||
|
};
|
||||||
|
const remoteOverlay = {
|
||||||
|
version: 1,
|
||||||
|
};
|
||||||
|
const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay,
|
||||||
|
remoteOverlay);
|
||||||
|
assert.equal(isRemoteOverlayNewer, false);
|
||||||
|
});
|
||||||
|
it('should return remoteOverlayIsNewer equals false if remote overlay ' +
|
||||||
|
'and the cached one are equal', () => {
|
||||||
|
const cachedOverlay = {
|
||||||
|
version: 1,
|
||||||
|
};
|
||||||
|
const remoteOverlay = {
|
||||||
|
version: 1,
|
||||||
|
};
|
||||||
|
const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay,
|
||||||
|
remoteOverlay);
|
||||||
|
assert.equal(isRemoteOverlayNewer, false);
|
||||||
|
});
|
||||||
|
it('should return remoteOverlayIsNewer equals true if remote overlay ' +
|
||||||
|
'version is greater than the cached one ', () => {
|
||||||
|
const cachedOverlay = {
|
||||||
|
version: 0,
|
||||||
|
};
|
||||||
|
const remoteOverlay = {
|
||||||
|
version: 1,
|
||||||
|
};
|
||||||
|
const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay,
|
||||||
|
remoteOverlay);
|
||||||
|
assert.equal(isRemoteOverlayNewer, true);
|
||||||
|
});
|
||||||
|
});
|
|
@ -0,0 +1,9 @@
|
||||||
|
{
|
||||||
|
"privateKey": "-----BEGIN RSA PRIVATE KEY-----\r\nMIIEowIBAAKCAQEAj13sSYE40lAX2qpBvfdGfcSVNtBf8i5FH+E8FAhORwwPu+2S\r\n3yBQbgwHq30WWxunGb1NmZL1wkVZ+vf12DtxqFRnMA08LfO4oO6oC4V8XfKeuHyJ\r\n1qlaKRINz6r9yDkTHtwWoBnlAINurlcNKgGD5p7D+G26Chbr/Oo0ZwHula9DxXy6\r\neH8/bJ5/BynyNyyWRPoAO+UkUdY5utkFCUq2dbBIhovMgjjikf5p2oWqnRKXc+JK\r\nBegr6lSHkkhyqNhTmd8+wA+8Cace4sy1ajY1t5V4wfRZea5vwl/HlyyKodvHdxng\r\nJgg6H61JMYPkplY6Gr9OryBKEAgq02zYoYTDfwIDAQABAoIBAAuDYGlavkRteCzw\r\nRU1LIVcSRWVcgIgDXTu9K8T0Ec0008Kkxomyn6LmxmroJbZ1VwsDH8s4eRH73ckA\r\nxrZxt6Pr+0lplq6eBvKtl8MtGhq1VDe+kJczjHEF6SQHOFAu/TEaPZrn2XMcGvRX\r\nO1BnRL9tepFlxm3u/06VRFYNWqqchM+tFyzLu2AuiuKd5+slSX7KZvVgdkY1ErKH\r\ngB75lPyhPb77C/6ptqUisVMSO4JhLhsD0+ekDVY982Sb7KkI+szdWSbtMx9Ek2Wo\r\ntXwJz7I8T7IbODy9aW9G+ydyhMDFmaEYIaDVFKJj5+fluNza3oQ5PtFNVE50GQJA\r\nsisGqfECgYEAwpkwt0KpSamSEH6qknNYPOwxgEuXWoFVzibko7is2tFPvY+YJowb\r\n68MqHIYhf7gHLq2dc5Jg1TTbGqLECjVxp4xLU4c95KBy1J9CPAcuH4xQLDXmeLzP\r\nJ2YgznRocbzAMCDAwafCr3uY9FM7oGDHAi5bE5W11xWx+9MlFExL3JkCgYEAvJp5\r\nf+JGN1W037bQe2QLYUWGszewZsvplnNOeytGQa57w4YdF42lPhMz6Kc/zdzKZpN9\r\njrshiIDhAD5NCno6dwqafBAW9WZl0sn7EnlLhD4Lwm8E9bRHnC9H82yFuqmNrzww\r\nzxBCQogJISwHiVz4EkU48B283ecBn0wT/fAa19cCgYEApKWsnEHgrhy1IxOpCoRh\r\nUhqdv2k1xDPN/8DUjtnAFtwmVcLa/zJopU/Zn4y1ZzSzjwECSTi+iWZRQ/YXXHPf\r\nl92SFjhFW92Niuy8w8FnevXjF6T7PYiy1SkJ9OR1QlZrXc04iiGBDazLu115A7ce\r\nanACS03OLw+CKgl6Q/RR83ECgYBCUngDVoimkMcIHHt3yJiP3ikeAKlRnMdJlsa0\r\nXWVZV4hCG3lDfRXsnEgWuimftNKf+6GdfYSvQdLdiQsCcjT5A4uLsQTByv5nf4uA\r\n1ZKOsFrmRrARzxGXhLDikvj7yP//7USkq+0BBGFhfuAvl7fMhPceyPZPehqB7/jf\r\nxX1LBQKBgAn5GgSXzzS0e06ZlP/VrKxreOHa5Z8wOmqqYQ0QTeczAbNNmuITdwwB\r\nNkbRqpVXRIfuj0BQBegAiix8om1W4it0cwz54IXBwQULxJR1StWxj3jo4QtpMQ+z\r\npVPdB1Ilb9zPV1YvDwRfdS1xsobzznAx56ecsXduZjs9mF61db8Q\r\n-----END RSA PRIVATE KEY-----\r\n",
|
||||||
|
"publicKey": "-----BEGIN PUBLIC KEY-----\r\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAj13sSYE40lAX2qpBvfdG\r\nfcSVNtBf8i5FH+E8FAhORwwPu+2S3yBQbgwHq30WWxunGb1NmZL1wkVZ+vf12Dtx\r\nqFRnMA08LfO4oO6oC4V8XfKeuHyJ1qlaKRINz6r9yDkTHtwWoBnlAINurlcNKgGD\r\n5p7D+G26Chbr/Oo0ZwHula9DxXy6eH8/bJ5/BynyNyyWRPoAO+UkUdY5utkFCUq2\r\ndbBIhovMgjjikf5p2oWqnRKXc+JKBegr6lSHkkhyqNhTmd8+wA+8Cace4sy1ajY1\r\nt5V4wfRZea5vwl/HlyyKodvHdxngJgg6H61JMYPkplY6Gr9OryBKEAgq02zYoYTD\r\nfwIDAQAB\r\n-----END PUBLIC KEY-----\r\n",
|
||||||
|
"accessKey": "QXP3VDG3SALNBX2QBJ1C",
|
||||||
|
"secretKey": "K5FyqZo5uFKfw9QBtn95o6vuPuD0zH/1seIrqPKqGnz8AxALNSx6EeRq7G1I6JJpS1XN13EhnwGn2ipsml3Uf2fQ00YgEmImG8wzGVZm8fWotpVO4ilN4JGyQCah81rNX4wZ9xHqDD7qYR5MyIERxR/osoXfctOwY7GGUjRKJfLOguNUlpaovejg6mZfTvYAiDF+PTO1sKUYqHt1IfKQtsK3dov1EFMBB5pWM7sVfncq/CthKN5M+VHx9Y87qdoP3+7AW+RCBbSDOfQgxvqtS7PIAf10mDl8k2kEURLz+RqChu4O4S0UzbEmtja7wa7WYhYKv/tM/QeW7kyNJMmnPg==",
|
||||||
|
"decryptedSecretKey": "n7PSZ3U6SgerF9PCNhXYsq3S3fRKVGdZTicGV8Ur",
|
||||||
|
"canonicalId": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be",
|
||||||
|
"userName": "orbituser"
|
||||||
|
}
|
|
@ -0,0 +1,43 @@
|
||||||
|
const assert = require('assert');
|
||||||
|
|
||||||
|
const { getCapabilities } = require('../../../lib/utilities/reportHandler');
|
||||||
|
|
||||||
|
// Ensures that expected features are enabled even if they
|
||||||
|
// rely on optional dependencies (such as secureChannelOptimizedPath)
|
||||||
|
describe('report handler', () => {
|
||||||
|
it('should report current capabilities', () => {
|
||||||
|
const c = getCapabilities();
|
||||||
|
assert.strictEqual(c.locationTypeDigitalOcean, true);
|
||||||
|
assert.strictEqual(c.locationTypeS3Custom, true);
|
||||||
|
assert.strictEqual(c.locationTypeSproxyd, true);
|
||||||
|
assert.strictEqual(c.locationTypeHyperdriveV2, true);
|
||||||
|
assert.strictEqual(c.locationTypeLocal, true);
|
||||||
|
assert.strictEqual(c.preferredReadLocation, true);
|
||||||
|
assert.strictEqual(c.managedLifecycle, true);
|
||||||
|
assert.strictEqual(c.secureChannelOptimizedPath, true);
|
||||||
|
assert.strictEqual(c.s3cIngestLocation, true);
|
||||||
|
});
|
||||||
|
|
||||||
|
[
|
||||||
|
{ value: 'true', result: true },
|
||||||
|
{ value: 'TRUE', result: true },
|
||||||
|
{ value: 'tRuE', result: true },
|
||||||
|
{ value: '1', result: true },
|
||||||
|
{ value: 'false', result: false },
|
||||||
|
{ value: 'FALSE', result: false },
|
||||||
|
{ value: 'FaLsE', result: false },
|
||||||
|
{ value: '0', result: false },
|
||||||
|
{ value: 'foo', result: false },
|
||||||
|
{ value: '', result: true },
|
||||||
|
{ value: undefined, result: true },
|
||||||
|
].forEach(param =>
|
||||||
|
it(`should allow set local file system capability ${param.value}`, () => {
|
||||||
|
const OLD_ENV = process.env;
|
||||||
|
|
||||||
|
if (param.value !== undefined) process.env.LOCAL_VOLUME_CAPABILITY = param.value;
|
||||||
|
assert.strictEqual(getCapabilities().locationTypeLocal, param.result);
|
||||||
|
|
||||||
|
process.env = OLD_ENV;
|
||||||
|
})
|
||||||
|
);
|
||||||
|
});
|
|
@ -0,0 +1,72 @@
|
||||||
|
const assert = require('assert');
|
||||||
|
|
||||||
|
const {
|
||||||
|
ChannelMessageV0,
|
||||||
|
MessageType,
|
||||||
|
TargetType,
|
||||||
|
} = require('../../../lib/management/ChannelMessageV0');
|
||||||
|
|
||||||
|
const {
|
||||||
|
CONFIG_OVERLAY_MESSAGE,
|
||||||
|
METRICS_REQUEST_MESSAGE,
|
||||||
|
METRICS_REPORT_MESSAGE,
|
||||||
|
CHANNEL_CLOSE_MESSAGE,
|
||||||
|
CHANNEL_PAYLOAD_MESSAGE,
|
||||||
|
} = MessageType;
|
||||||
|
|
||||||
|
const { TARGET_ANY } = TargetType;
|
||||||
|
|
||||||
|
describe('ChannelMessageV0', () => {
|
||||||
|
describe('codec', () => {
|
||||||
|
it('should roundtrip metrics report', () => {
|
||||||
|
const b = ChannelMessageV0.encodeMetricsReportMessage({ a: 1 });
|
||||||
|
const m = new ChannelMessageV0(b);
|
||||||
|
|
||||||
|
assert.strictEqual(METRICS_REPORT_MESSAGE, m.getType());
|
||||||
|
assert.strictEqual(0, m.getChannelNumber());
|
||||||
|
assert.strictEqual(m.getTarget(), TARGET_ANY);
|
||||||
|
assert.strictEqual(m.getPayload().toString(), '{"a":1}');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should roundtrip channel data', () => {
|
||||||
|
const data = new Buffer('dummydata');
|
||||||
|
const b = ChannelMessageV0.encodeChannelDataMessage(50, data);
|
||||||
|
const m = new ChannelMessageV0(b);
|
||||||
|
|
||||||
|
assert.strictEqual(CHANNEL_PAYLOAD_MESSAGE, m.getType());
|
||||||
|
assert.strictEqual(50, m.getChannelNumber());
|
||||||
|
assert.strictEqual(m.getTarget(), TARGET_ANY);
|
||||||
|
assert.strictEqual(m.getPayload().toString(), 'dummydata');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should roundtrip channel close', () => {
|
||||||
|
const b = ChannelMessageV0.encodeChannelCloseMessage(3);
|
||||||
|
const m = new ChannelMessageV0(b);
|
||||||
|
|
||||||
|
assert.strictEqual(CHANNEL_CLOSE_MESSAGE, m.getType());
|
||||||
|
assert.strictEqual(3, m.getChannelNumber());
|
||||||
|
assert.strictEqual(m.getTarget(), TARGET_ANY);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
describe('decoder', () => {
|
||||||
|
it('should parse metrics request', () => {
|
||||||
|
const b = new Buffer([METRICS_REQUEST_MESSAGE, 0, 0]);
|
||||||
|
const m = new ChannelMessageV0(b);
|
||||||
|
|
||||||
|
assert.strictEqual(METRICS_REQUEST_MESSAGE, m.getType());
|
||||||
|
assert.strictEqual(0, m.getChannelNumber());
|
||||||
|
assert.strictEqual(m.getTarget(), TARGET_ANY);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should parse overlay push', () => {
|
||||||
|
const b = new Buffer([CONFIG_OVERLAY_MESSAGE, 0, 0, 34, 65, 34]);
|
||||||
|
const m = new ChannelMessageV0(b);
|
||||||
|
|
||||||
|
assert.strictEqual(CONFIG_OVERLAY_MESSAGE, m.getType());
|
||||||
|
assert.strictEqual(0, m.getChannelNumber());
|
||||||
|
assert.strictEqual(m.getTarget(), TARGET_ANY);
|
||||||
|
assert.strictEqual(m.getPayload().toString(), '"A"');
|
||||||
|
});
|
||||||
|
});
|
||||||
|
});
|
|
@ -1,254 +0,0 @@
|
||||||
const assert = require('assert');
|
|
||||||
const { parseRedisConfig } = require('../../../lib/Config');
|
|
||||||
|
|
||||||
describe('parseRedisConfig', () => {
|
|
||||||
[
|
|
||||||
{
|
|
||||||
desc: 'with host and port',
|
|
||||||
input: {
|
|
||||||
host: 'localhost',
|
|
||||||
port: 6479,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with host, port and password',
|
|
||||||
input: {
|
|
||||||
host: 'localhost',
|
|
||||||
port: 6479,
|
|
||||||
password: 'mypass',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with host, port and an empty password',
|
|
||||||
input: {
|
|
||||||
host: 'localhost',
|
|
||||||
port: 6479,
|
|
||||||
password: '',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with host, port and an empty retry config',
|
|
||||||
input: {
|
|
||||||
host: 'localhost',
|
|
||||||
port: 6479,
|
|
||||||
retry: {
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with host, port and a custom retry config',
|
|
||||||
input: {
|
|
||||||
host: 'localhost',
|
|
||||||
port: 6479,
|
|
||||||
retry: {
|
|
||||||
connectBackoff: {
|
|
||||||
min: 10,
|
|
||||||
max: 1000,
|
|
||||||
jitter: 0.1,
|
|
||||||
factor: 1.5,
|
|
||||||
deadline: 10000,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a single sentinel and no sentinel password',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: 'localhost',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with two sentinels and a sentinel password',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: '10.20.30.40',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: '10.20.30.41',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
sentinelPassword: 'mypass',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a sentinel and an empty sentinel password',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: '10.20.30.40',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
sentinelPassword: '',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a basic production-like config with sentinels',
|
|
||||||
input: {
|
|
||||||
name: 'scality-s3',
|
|
||||||
password: '',
|
|
||||||
sentinelPassword: '',
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: 'storage-1',
|
|
||||||
port: 16379,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: 'storage-2',
|
|
||||||
port: 16379,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: 'storage-3',
|
|
||||||
port: 16379,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: 'storage-4',
|
|
||||||
port: 16379,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: 'storage-5',
|
|
||||||
port: 16379,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a single sentinel passed as a string',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: '10.20.30.40:16479',
|
|
||||||
},
|
|
||||||
output: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: '10.20.30.40',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a list of sentinels passed as a string',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: '10.20.30.40:16479,another-host:16480,10.20.30.42:16481',
|
|
||||||
sentinelPassword: 'mypass',
|
|
||||||
},
|
|
||||||
output: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: '10.20.30.40',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: 'another-host',
|
|
||||||
port: 16480,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
host: '10.20.30.42',
|
|
||||||
port: 16481,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
sentinelPassword: 'mypass',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
].forEach(testCase => {
|
|
||||||
it(`should parse a valid config ${testCase.desc}`, () => {
|
|
||||||
const redisConfig = parseRedisConfig(testCase.input);
|
|
||||||
assert.deepStrictEqual(redisConfig, testCase.output || testCase.input);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
|
|
||||||
[
|
|
||||||
{
|
|
||||||
desc: 'that is empty',
|
|
||||||
input: {},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with only a host',
|
|
||||||
input: {
|
|
||||||
host: 'localhost',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with only a port',
|
|
||||||
input: {
|
|
||||||
port: 6479,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a custom retry config with missing values',
|
|
||||||
input: {
|
|
||||||
host: 'localhost',
|
|
||||||
port: 6479,
|
|
||||||
retry: {
|
|
||||||
connectBackoff: {
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a sentinel but no name',
|
|
||||||
input: {
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: 'localhost',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with a sentinel but an empty name',
|
|
||||||
input: {
|
|
||||||
name: '',
|
|
||||||
sentinels: [
|
|
||||||
{
|
|
||||||
host: 'localhost',
|
|
||||||
port: 16479,
|
|
||||||
},
|
|
||||||
],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with an empty list of sentinels',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: [],
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with an empty list of sentinels passed as a string',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: '',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
desc: 'with an invalid list of sentinels passed as a string (missing port)',
|
|
||||||
input: {
|
|
||||||
name: 'myname',
|
|
||||||
sentinels: '10.20.30.40:16479,10.20.30.50',
|
|
||||||
},
|
|
||||||
},
|
|
||||||
].forEach(testCase => {
|
|
||||||
it(`should fail to parse an invalid config ${testCase.desc}`, () => {
|
|
||||||
assert.throws(() => {
|
|
||||||
parseRedisConfig(testCase.input);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
||||||
});
|
|
|
@ -1,48 +0,0 @@
|
||||||
module.exports = {
|
|
||||||
entry: './index.js',
|
|
||||||
target: 'node',
|
|
||||||
devtool: 'source-map',
|
|
||||||
output: {
|
|
||||||
filename: 'zenko-vitastor.js'
|
|
||||||
},
|
|
||||||
module: {
|
|
||||||
rules: [
|
|
||||||
{
|
|
||||||
test: /.jsx?$/,
|
|
||||||
use: {
|
|
||||||
loader: 'babel-loader',
|
|
||||||
options: {
|
|
||||||
presets: [ [ "@babel/preset-env", { "targets": { "node": "12.0" }, "exclude": [ "transform-regenerator" ] } ] ],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
|
||||||
test: /.json$/,
|
|
||||||
type: 'json'
|
|
||||||
}
|
|
||||||
]
|
|
||||||
},
|
|
||||||
externals: {
|
|
||||||
'leveldown': 'commonjs leveldown',
|
|
||||||
'bufferutil': 'commonjs bufferutil',
|
|
||||||
'diskusage': 'commonjs diskusage',
|
|
||||||
'utf-8-validate': 'commonjs utf-8-validate',
|
|
||||||
'fcntl': 'commonjs fcntl',
|
|
||||||
'ioctl': 'commonjs ioctl',
|
|
||||||
'vitastor': 'commonjs vitastor',
|
|
||||||
'vaultclient': 'commonjs vaultclient',
|
|
||||||
'bucketclient': 'commonjs bucketclient',
|
|
||||||
'scality-kms': 'commonjs scality-kms',
|
|
||||||
'sproxydclient': 'commonjs sproxydclient',
|
|
||||||
'hdclient': 'commonjs hdclient',
|
|
||||||
'cdmiclient': 'commonjs cdmiclient',
|
|
||||||
'kerberos': 'commonjs kerberos',
|
|
||||||
'@mongodb-js/zstd': 'commonjs @mongodb-js/zstd',
|
|
||||||
'@aws-sdk/credential-providers': 'commonjs @aws-sdk/credential-providers',
|
|
||||||
'snappy': 'commonjs snappy',
|
|
||||||
'mongodb-client-encryption': 'commonjs mongodb-client-encryption'
|
|
||||||
},
|
|
||||||
node: {
|
|
||||||
__dirname: false
|
|
||||||
}
|
|
||||||
};
|
|
Loading…
Reference in New Issue