Compare commits

..

37 Commits

Author SHA1 Message Date
williamlardier d85efe8f79 s 2024-04-17 19:11:30 +02:00
williamlardier a45cb8340f CLDSRV-515: upgrade checkout version 2024-04-17 18:45:49 +02:00
williamlardier 156f971970 CLDSRV-515: add ft test for multi delete 2024-04-17 18:45:49 +02:00
williamlardier 2228657bbe CLDSRV-515: check quotas during multi deletes 2024-04-17 18:22:46 +02:00
williamlardier 0402466a06 [tmp] will be removed after rebase to latest branches 2024-04-15 16:20:10 +02:00
williamlardier 9dac38ef57 CLDSRV-515: improve quota API tooling 2024-04-15 16:02:30 +02:00
williamlardier f31e4a4b08 CLDSRV-515: install typescript in docker builder 2024-04-15 16:02:30 +02:00
williamlardier 98edc0c696 CLDSRV-515: add account quota evaluation logic and tests
Also converts the function to a callback function, to better
handle error within APIs.
2024-04-15 16:02:30 +02:00
williamlardier 21c9055fa1 CLDSRV-515: add functional tests 2024-04-15 16:02:30 +02:00
williamlardier 4429f37366 CLDSRV-515: add unit tests 2024-04-15 14:28:56 +02:00
williamlardier 9578819cac CLDSRV-515: add quota evaluation logic for buckets 2024-04-15 14:28:56 +02:00
williamlardier f5bcceda2c CLDSRV-515: bootstrap scuba on startup 2024-04-15 14:09:40 +02:00
williamlardier 98d06e4b1b CLDSRV-515: add a wrapper for scubaclient and healthchecks 2024-04-15 14:09:40 +02:00
williamlardier 6a6a7763d2 CLDSRV-515: handle scuba config 2024-04-15 14:09:40 +02:00
williamlardier 0b96e4ef4d CLDSRV-515: do not recreate variable at every authz 2024-04-15 14:09:40 +02:00
williamlardier 04e39940c0 CLDSRV-515: update dependencies 2024-04-15 14:09:40 +02:00
Maha Benzekri a9e65ef91e
wip 2024-04-09 15:24:52 +02:00
Maha Benzekri 4dd2b06e10
wip 2024-04-09 15:21:53 +02:00
Maha Benzekri d2eafe4aa6
wip 2024-04-09 14:07:24 +02:00
Maha Benzekri be486d3303
fixup on legacy test 2024-04-05 15:49:26 +02:00
Maha Benzekri c8ade032c6
fixup on legacy test 2024-04-05 15:49:26 +02:00
Maha Benzekri b45c80fa18
fixup on legacy test 2024-04-05 15:49:25 +02:00
Maha Benzekri 8fb0569b5e
wip 2024-04-05 15:49:25 +02:00
Maha Benzekri 28e697b95f
wip 2024-04-05 15:49:25 +02:00
Maha Benzekri 9a4873379e
wup 2024-04-05 15:49:25 +02:00
Maha Benzekri 9df036137d
wip 2024-04-05 15:49:25 +02:00
Maha Benzekri 208bb0d3fb
wip 2024-04-05 15:49:24 +02:00
Maha Benzekri 8b3bb32e8a
wip 2024-04-05 15:49:24 +02:00
Maha Benzekri 7e3130c071
wip 2024-04-05 15:49:24 +02:00
Maha Benzekri fd9140e1d1
wip 2024-04-05 15:49:24 +02:00
Maha Benzekri 026cf9d4d1
wip 2024-04-05 15:49:24 +02:00
Maha Benzekri f36becbc25
wip 2024-04-05 15:49:23 +02:00
Maha Benzekri b6bea08b90
wip 2024-04-05 15:49:23 +02:00
Maha Benzekri 837cdb2705
wip 2024-04-05 15:49:23 +02:00
Maha Benzekri 6c5b5a0bf5
wip 2024-04-05 15:49:23 +02:00
Maha Benzekri 1ca4ffadd4
wip 2024-04-05 15:49:23 +02:00
Maha Benzekri f906076a0e
wip 2024-04-05 15:49:22 +02:00
84 changed files with 9833 additions and 4221 deletions

View File

@ -16,7 +16,7 @@ runs:
run: |-
set -exu;
mkdir -p /tmp/artifacts/${JOB_NAME}/;
- uses: actions/setup-node@v4
- uses: actions/setup-node@v2
with:
node-version: '16'
cache: 'yarn'

View File

@ -43,8 +43,6 @@ services:
- SCUBA_HOST
- SCUBA_PORT
- SCUBA_HEALTHCHECK_FREQUENCY
- S3QUOTA
- QUOTA_ENABLE_INFLIGHTS
env_file:
- creds.env
depends_on:
@ -72,7 +70,7 @@ services:
pykmip:
network_mode: "host"
profiles: ['pykmip']
image: ${PYKMIP_IMAGE:-ghcr.io/scality/cloudserver/pykmip}
image: registry.scality.com/cloudserver-dev/pykmip
volumes:
- /tmp/artifacts/${JOB_NAME}:/artifacts
mongo:

View File

@ -1,3 +1,3 @@
FROM ghcr.io/scality/federation/sproxyd:7.10.6.8
FROM registry.scality.com/federation/sproxyd:7.10.6.8
ADD ./conf/supervisord.conf ./conf/nginx.conf ./conf/fastcgi_params ./conf/sproxyd0.conf /conf/
RUN chown root:root /conf/sproxyd0.conf

View File

@ -20,16 +20,13 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Render and test ${{ matrix.tests.name }}
uses: scality/action-prom-render-test@1.0.3
uses: scality/action-prom-render-test@1.0.1
with:
alert_file_path: monitoring/alerts.yaml
test_file_path: ${{ matrix.tests.file }}
alert_inputs: |
namespace=zenko
service=artesca-data-connector-s3api-metrics
reportJob=artesca-data-ops-report-handler
replicas=3
alert_inputs: >-
namespace=zenko,service=artesca-data-connector-s3api-metrics,replicas=3
github_token: ${{ secrets.GITHUB_TOKEN }}

View File

@ -14,12 +14,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
uses: github/codeql-action/init@v2
with:
languages: javascript, python, ruby
- name: Build and analyze
uses: github/codeql-action/analyze@v3
uses: github/codeql-action/analyze@v2

View File

@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: 'Dependency Review'
uses: actions/dependency-review-action@v4
uses: actions/dependency-review-action@v3

View File

@ -10,69 +10,58 @@ on:
required: true
env:
REGISTRY_NAME: registry.scality.com
PROJECT_NAME: ${{ github.event.repository.name }}
jobs:
build-federation-image:
runs-on: ubuntu-20.04
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
- name: Build and push image for federation
uses: docker/build-push-action@v5
with:
push: true
context: .
file: images/svc-base/Dockerfile
tags: |
ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}-svc-base
cache-from: type=gha,scope=federation
cache-to: type=gha,mode=max,scope=federation
uses: scality/workflows/.github/workflows/docker-build.yaml@v1
secrets: inherit
with:
push: true
registry: registry.scality.com
namespace: ${{ github.event.repository.name }}
name: ${{ github.event.repository.name }}
context: .
file: images/svc-base/Dockerfile
tag: ${{ github.event.inputs.tag }}-svc-base
release:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Set up Docker Buildk
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
- name: Login to Registry
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
registry: ${{ env.REGISTRY_NAME }}
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Push dashboards into the production namespace
run: |
oras push ghcr.io/${{ github.repository }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
oras push ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}-dashboards:${{ github.event.inputs.tag }} \
dashboard.json:application/grafana-dashboard+json \
alerts.yaml:application/prometheus-alerts+yaml
working-directory: monitoring
- name: Build and push
uses: docker/build-push-action@v5
uses: docker/build-push-action@v4
with:
context: .
push: true
tags: ghcr.io/${{ github.repository }}:${{ github.event.inputs.tag }}
tags: ${{ env.REGISTRY_NAME }}/${{ env.PROJECT_NAME }}/${{ env.PROJECT_NAME }}:${{ github.event.inputs.tag }}
cache-from: type=gha
cache-to: type=gha,mode=max
- name: Create Release
uses: softprops/action-gh-release@v2
uses: softprops/action-gh-release@v1
env:
GITHUB_TOKEN: ${{ github.token }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
name: Release ${{ github.event.inputs.tag }}
tag_name: ${{ github.event.inputs.tag }}

View File

@ -67,24 +67,23 @@ env:
ENABLE_LOCAL_CACHE: "true"
REPORT_TOKEN: "report-token-1"
REMOTE_MANAGEMENT_DISABLE: "1"
# https://github.com/git-lfs/git-lfs/issues/5749
GIT_CLONE_PROTECTION_ACTIVE: 'false'
jobs:
linting-coverage:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- uses: actions/setup-node@v4
uses: actions/checkout@v3
- uses: actions/setup-node@v2
with:
node-version: '16'
cache: yarn
- name: install dependencies
run: yarn install --frozen-lockfile --network-concurrency 1
- uses: actions/setup-python@v5
- uses: actions/setup-python@v4
with:
python-version: '3.9'
- uses: actions/cache@v4
- uses: actions/cache@v3
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip
@ -117,7 +116,7 @@ jobs:
find . -name "*junit*.xml" -exec cp {} artifacts/junit/ ";"
if: always()
- name: Upload files to artifacts
uses: scality/action-artifacts@v4
uses: scality/action-artifacts@v2
with:
method: upload
url: https://artifacts.scality.net
@ -133,54 +132,46 @@ jobs:
packages: write
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
uses: docker/setup-buildx-action@v2
- name: Login to GitHub Registry
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Login to Registry
uses: docker/login-action@v2
with:
registry: registry.scality.com
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Build and push cloudserver image
uses: docker/build-push-action@v5
uses: docker/build-push-action@v4
with:
push: true
context: .
provenance: false
tags: |
ghcr.io/${{ github.repository }}:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
registry.scality.com/cloudserver-dev/cloudserver:${{ github.sha }}
cache-from: type=gha,scope=cloudserver
cache-to: type=gha,mode=max,scope=cloudserver
- name: Build and push pykmip image
uses: docker/build-push-action@v5
with:
push: true
context: .github/pykmip
tags: |
ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
labels: |
git.repository=${{ github.repository }}
git.commit-sha=${{ github.sha }}
cache-from: type=gha,scope=pykmip
cache-to: type=gha,mode=max,scope=pykmip
- name: Build and push MongoDB
uses: docker/build-push-action@v5
uses: docker/build-push-action@v4
with:
push: true
context: .github/docker/mongodb
tags: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
cache-from: type=gha,scope=mongodb
cache-to: type=gha,mode=max,scope=mongodb
cache-from: type=gha
cache-to: type=gha,mode=max
multiple-backend:
runs-on: ubuntu-latest
needs: build
env:
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
S3BACKEND: mem
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
@ -188,13 +179,13 @@ jobs:
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Login to Registry
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
registry: registry.scality.com
username: ${{ secrets.REGISTRY_LOGIN }}
password: ${{ secrets.REGISTRY_PASSWORD }}
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
@ -209,7 +200,7 @@ jobs:
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
uses: scality/action-artifacts@v3
with:
method: upload
url: https://artifacts.scality.net
@ -229,11 +220,11 @@ jobs:
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigTests.json
DEFAULT_BUCKET_KEY_FORMAT: v0
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
@ -247,7 +238,7 @@ jobs:
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
uses: scality/action-artifacts@v3
with:
method: upload
url: https://artifacts.scality.net
@ -268,11 +259,11 @@ jobs:
DEFAULT_BUCKET_KEY_FORMAT: v1
METADATA_MAX_CACHED_BUCKETS: 1
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
@ -287,7 +278,7 @@ jobs:
env:
S3_LOCATION_FILE: tests/locationConfig/locationConfigTests.json
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
uses: scality/action-artifacts@v3
with:
method: upload
url: https://artifacts.scality.net
@ -307,13 +298,13 @@ jobs:
env:
S3BACKEND: file
S3VAULT: mem
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
MPU_TESTING: "yes"
JOB_NAME: ${{ matrix.job-name }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup matrix job artifacts directory
@ -330,7 +321,7 @@ jobs:
bash wait_for_local_port.bash 8000 40
yarn run ft_test | tee /tmp/artifacts/${{ matrix.job-name }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
uses: scality/action-artifacts@v3
with:
method: upload
url: https://artifacts.scality.net
@ -344,14 +335,14 @@ jobs:
needs: build
env:
ENABLE_UTAPI_V2: t
S3BACKEND: mem
S3BACKEND: mem
BUCKET_DENY_FILTER: utapi-event-filter-deny-bucket
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
@ -363,7 +354,7 @@ jobs:
bash wait_for_local_port.bash 8000 40
yarn run test_utapi_v2 | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
uses: scality/action-artifacts@v3
with:
method: upload
url: https://artifacts.scality.net
@ -372,25 +363,15 @@ jobs:
source: /tmp/artifacts
if: always()
quota-tests:
scuba-tests:
runs-on: ubuntu-latest
needs: build
strategy:
matrix:
inflights:
- name: "With Inflights"
value: "true"
- name: "Without Inflights"
value: "false"
env:
S3METADATA: mongodb
S3BACKEND: mem
S3QUOTA: scuba
QUOTA_ENABLE_INFLIGHTS: ${{ matrix.inflights.value }}
SCUBA_HOST: localhost
SCUBA_PORT: 8100
SCUBA_HEALTHCHECK_FREQUENCY: 100
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
@ -399,15 +380,15 @@ jobs:
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Setup CI services
run: docker compose --profile mongo up -d
run: docker compose up -d
working-directory: .github/docker
- name: Run quota tests
- name: Run scuba tests
run: |-
set -ex -o pipefail;
bash wait_for_local_port.bash 8000 40
yarn run test_quota | tee /tmp/artifacts/${{ github.job }}/tests.log
yarn run test_scuba | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
uses: scality/action-artifacts@v3
with:
method: upload
url: https://artifacts.scality.net
@ -423,13 +404,12 @@ jobs:
S3BACKEND: file
S3VAULT: mem
MPU_TESTING: "yes"
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
PYKMIP_IMAGE: ghcr.io/${{ github.repository }}/pykmip:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- name: Copy KMIP certs
@ -445,7 +425,7 @@ jobs:
bash wait_for_local_port.bash 5696 40
yarn run ft_kmip | tee /tmp/artifacts/${{ github.job }}/tests.log
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
uses: scality/action-artifacts@v3
with:
method: upload
url: https://artifacts.scality.net
@ -453,7 +433,7 @@ jobs:
password: ${{ secrets.ARTIFACTS_PASSWORD }}
source: /tmp/artifacts
if: always()
ceph-backend-test:
runs-on: ubuntu-latest
needs: build
@ -465,17 +445,17 @@ jobs:
MPU_TESTING: "yes"
S3_LOCATION_FILE: /usr/src/app/tests/locationConfig/locationConfigCeph.json
MONGODB_IMAGE: ghcr.io/${{ github.repository }}/ci-mongodb:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}:${{ github.sha }}
CLOUDSERVER_IMAGE: ghcr.io/${{ github.repository }}/cloudserver:${{ github.sha }}
JOB_NAME: ${{ github.job }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v3
- name: Login to GitHub Registry
uses: docker/login-action@v3
uses: docker/login-action@v2
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ github.token }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Setup CI environment
uses: ./.github/actions/setup-ci
- uses: ruby/setup-ruby@v1
@ -523,7 +503,7 @@ jobs:
S3VAULT: mem
S3METADATA: mongodb
- name: Upload logs to artifacts
uses: scality/action-artifacts@v4
uses: scality/action-artifacts@v3
with:
method: upload
url: https://artifacts.scality.net

175
README.md
View File

@ -1,7 +1,10 @@
# Zenko CloudServer with Vitastor Backend
# Zenko CloudServer
![Zenko CloudServer logo](res/scality-cloudserver-logo.png)
[![Docker Pulls][badgedocker]](https://hub.docker.com/r/zenko/cloudserver)
[![Docker Pulls][badgetwitter]](https://twitter.com/zenko)
## Overview
CloudServer (formerly S3 Server) is an open-source Amazon S3-compatible
@ -11,71 +14,137 @@ Scalitys Open Source Multi-Cloud Data Controller.
CloudServer provides a single AWS S3 API interface to access multiple
backend data storage both on-premise or public in the cloud.
This repository contains a fork of CloudServer with [Vitastor](https://git.yourcmc.ru/vitalif/vitastor)
backend support.
CloudServer is useful for Developers, either to run as part of a
continous integration test environment to emulate the AWS S3 service locally
or as an abstraction layer to develop object storage enabled
application on the go.
## Quick Start with Vitastor
## Learn more at [www.zenko.io/cloudserver](https://www.zenko.io/cloudserver/)
Vitastor Backend is in experimental status, however you can already try to
run it and write or read something, or even mount it with [GeeseFS](https://github.com/yandex-cloud/geesefs),
it works too 😊.
## [May I offer you some lovely documentation?](http://s3-server.readthedocs.io/en/latest/)
Installation instructions:
## Docker
### Install Vitastor
[Run your Zenko CloudServer with Docker](https://hub.docker.com/r/zenko/cloudserver/)
Refer to [Vitastor Quick Start Manual](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/docs/intro/quickstart.en.md).
## Contributing
### Install Zenko with Vitastor Backend
In order to contribute, please follow the
[Contributing Guidelines](
https://github.com/scality/Guidelines/blob/master/CONTRIBUTING.md).
- Clone this repository: `git clone https://git.yourcmc.ru/vitalif/zenko-cloudserver-vitastor`
- Install dependencies: `npm install --omit dev` or just `npm install`
- Clone Vitastor repository: `git clone https://git.yourcmc.ru/vitalif/vitastor`
- Build Vitastor node.js binding by running `npm install` in `node-binding` subdirectory of Vitastor repository.
You need `node-gyp` and `vitastor-client-dev` (Vitastor client library) for it to succeed.
- Symlink Vitastor module to Zenko: `ln -s /path/to/vitastor/node-binding /path/to/zenko/node_modules/vitastor`
## Installation
### Install and Configure MongoDB
### Dependencies
Refer to [MongoDB Manual](https://www.mongodb.com/docs/manual/installation/).
Building and running the Zenko CloudServer requires node.js 10.x and yarn v1.17.x
. Up-to-date versions can be found at
[Nodesource](https://github.com/nodesource/distributions).
### Setup Zenko
### Clone source code
- Create a separate pool for S3 object data in your Vitastor cluster: `vitastor-cli create-pool s3-data`
- Retrieve ID of the new pool from `vitastor-cli ls-pools --detail s3-data`
- In another pool, create an image for storing Vitastor volume metadata: `vitastor-cli create -s 10G s3-volume-meta`
- Copy `config.json.vitastor` to `config.json`, adjust it to match your domain
- Copy `authdata.json.example` to `authdata.json` - this is where you set S3 access & secret keys,
and also adjust them if you want to. Scality seems to use a separate auth service "Scality Vault" for
access keys, but it's not published, so let's use a file for now.
- Copy `locationConfig.json.vitastor` to `locationConfig.json` - this is where you set Vitastor cluster access data.
You should put correct values for `pool_id` (pool ID from the second step) and `metadata_image` (from the third step)
in this file.
Note: `locationConfig.json` in this version corresponds to storage classes (like STANDARD, COLD, etc)
instead of "locations" (zones like us-east-1) as it was in original Zenko CloudServer.
### Start Zenko
Start the S3 server with: `node index.js`
If you use default settings, Zenko CloudServer starts on port 8000.
The default access key is `accessKey1` with a secret key of `verySecretKey1`.
Now you can access your S3 with `s3cmd` or `geesefs`:
```
s3cmd --access_key=accessKey1 --secret_key=verySecretKey1 --host=http://localhost:8000 mb s3://testbucket
```shell
git clone https://github.com/scality/S3.git
```
```
AWS_ACCESS_KEY_ID=accessKey1 \
AWS_SECRET_ACCESS_KEY=verySecretKey1 \
geesefs --endpoint http://localhost:8000 testbucket mountdir
### Install js dependencies
Go to the ./S3 folder,
```shell
yarn install --frozen-lockfile
```
# Author & License
If you get an error regarding installation of the diskUsage module,
please install g++.
- [Zenko CloudServer](https://s3-server.readthedocs.io/en/latest/) author is Scality, licensed under [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
- [Vitastor](https://git.yourcmc.ru/vitalif/vitastor/) and Zenko Vitastor backend author is Vitaliy Filippov, licensed under [VNPL-1.1](https://git.yourcmc.ru/vitalif/vitastor/src/branch/master/VNPL-1.1.txt)
(a "network copyleft" license based on AGPL/SSPL, but worded in a better way)
If you get an error regarding level-down bindings, try clearing your yarn cache:
```shell
yarn cache clean
```
## Run it with a file backend
```shell
yarn start
```
This starts a Zenko CloudServer on port 8000. Two additional ports 9990 and
9991 are also open locally for internal transfer of metadata and data,
respectively.
The default access key is accessKey1 with
a secret key of verySecretKey1.
By default the metadata files will be saved in the
localMetadata directory and the data files will be saved
in the localData directory within the ./S3 directory on your
machine. These directories have been pre-created within the
repository. If you would like to save the data or metadata in
different locations of your choice, you must specify them with absolute paths.
So, when starting the server:
```shell
mkdir -m 700 $(pwd)/myFavoriteDataPath
mkdir -m 700 $(pwd)/myFavoriteMetadataPath
export S3DATAPATH="$(pwd)/myFavoriteDataPath"
export S3METADATAPATH="$(pwd)/myFavoriteMetadataPath"
yarn start
```
## Run it with multiple data backends
```shell
export S3DATA='multiple'
yarn start
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
With multiple backends, you have the ability to
choose where each object will be saved by setting
the following header with a locationConstraint on
a PUT request:
```shell
'x-amz-meta-scal-location-constraint':'myLocationConstraint'
```
If no header is sent with a PUT object request, the
location constraint of the bucket will determine
where the data is saved. If the bucket has no location
constraint, the endpoint of the PUT request will be
used to determine location.
See the Configuration section in our documentation
[here](http://s3-server.readthedocs.io/en/latest/GETTING_STARTED/#configuration)
to learn how to set location constraints.
## Run it with an in-memory backend
```shell
yarn run mem_backend
```
This starts a Zenko CloudServer on port 8000.
The default access key is accessKey1 with
a secret key of verySecretKey1.
## Run it with Vault user management
Note: Vault is proprietary and must be accessed separately.
```shell
export S3VAULT=vault
yarn start
```
This starts a Zenko CloudServer using Vault for user management.
[badgetwitter]: https://img.shields.io/twitter/follow/zenko.svg?style=social&label=Follow
[badgedocker]: https://img.shields.io/docker/pulls/scality/s3server.svg
[badgepub]: https://circleci.com/gh/scality/S3.svg?style=svg
[badgepriv]: http://ci.ironmann.io/gh/scality/S3.svg?style=svg&circle-token=1f105b7518b53853b5b7cf72302a3f75d8c598ae

46
bin/metrics_server.js Executable file
View File

@ -0,0 +1,46 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?metrics=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

46
bin/secure_channel_proxy.js Executable file
View File

@ -0,0 +1,46 @@
#!/usr/bin/env node
'use strict'; // eslint-disable-line strict
const {
startWSManagementClient,
startPushConnectionHealthCheckServer,
} = require('../lib/management/push');
const logger = require('../lib/utilities/logger');
const {
PUSH_ENDPOINT: pushEndpoint,
INSTANCE_ID: instanceId,
MANAGEMENT_TOKEN: managementToken,
} = process.env;
if (!pushEndpoint) {
logger.error('missing push endpoint env var');
process.exit(1);
}
if (!instanceId) {
logger.error('missing instance id env var');
process.exit(1);
}
if (!managementToken) {
logger.error('missing management token env var');
process.exit(1);
}
startPushConnectionHealthCheckServer(err => {
if (err) {
logger.error('could not start healthcheck server', { error: err });
process.exit(1);
}
const url = `${pushEndpoint}/${instanceId}/ws?proxy=1`;
startWSManagementClient(url, managementToken, err => {
if (err) {
logger.error('connection failed, exiting', { error: err });
process.exit(1);
}
logger.info('no more connection, exiting');
process.exit(0);
});
});

View File

@ -4,7 +4,6 @@
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"workers": 4,
"restEndpoints": {
"localhost": "us-east-1",
"127.0.0.1": "us-east-1",
@ -102,14 +101,6 @@
"readPreference": "primary",
"database": "metadata"
},
"authdata": "authdata.json",
"backends": {
"auth": "file",
"data": "file",
"metadata": "mongodb",
"kms": "file",
"quota": "none"
},
"externalBackends": {
"aws_s3": {
"httpAgent": {

View File

@ -1,71 +0,0 @@
{
"port": 8000,
"listenOn": [],
"metricsPort": 8002,
"metricsListenOn": [],
"replicationGroupId": "RG001",
"restEndpoints": {
"localhost": "STANDARD",
"127.0.0.1": "STANDARD",
"yourhostname.ru": "STANDARD"
},
"websiteEndpoints": [
"static.yourhostname.ru"
],
"replicationEndpoints": [ {
"site": "zenko",
"servers": ["127.0.0.1:8000"],
"default": true
} ],
"log": {
"logLevel": "info",
"dumpLevel": "error"
},
"healthChecks": {
"allowFrom": ["127.0.0.1/8", "::1"]
},
"backends": {
"metadata": "mongodb"
},
"mongodb": {
"replicaSetHosts": "127.0.0.1:27017",
"writeConcern": "majority",
"replicaSet": "rs0",
"readPreference": "primary",
"database": "s3",
"authCredentials": {
"username": "s3",
"password": ""
}
},
"externalBackends": {
"aws_s3": {
"httpAgent": {
"keepAlive": false,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
},
"gcp": {
"httpAgent": {
"keepAlive": true,
"keepAliveMsecs": 1000,
"maxFreeSockets": 256,
"maxSockets": null
}
}
},
"requests": {
"viaProxy": false,
"trustedProxyCIDRs": [],
"extractClientIPFromHeader": ""
},
"bucketNotificationDestinations": [
{
"resource": "target1",
"type": "dummy",
"host": "localhost:6000"
}
]
}

View File

@ -116,7 +116,7 @@ const constants = {
],
// user metadata header to set object locationConstraint
objectLocationConstraintHeader: 'x-amz-storage-class',
objectLocationConstraintHeader: 'x-amz-meta-scal-location-constraint',
lastModifiedHeader: 'x-amz-meta-x-scal-last-modified',
legacyLocations: ['sproxyd', 'legacy'],
// declare here all existing service accounts and their properties
@ -205,6 +205,9 @@ const constants = {
],
allowedUtapiEventFilterStates: ['allow', 'deny'],
allowedRestoreObjectRequestTierValues: ['Standard'],
validStorageClasses: [
'STANDARD',
],
lifecycleListing: {
CURRENT_TYPE: 'current',
NON_CURRENT_TYPE: 'noncurrent',

View File

@ -2,12 +2,11 @@
## Docker Image Generation
Docker images are hosted on [ghcri.io](https://github.com/orgs/scality/packages).
CloudServer has a few images there:
Docker images are hosted on [registry.scality.com](registry.scality.com).
CloudServer has two namespaces there:
* Cloudserver container image: ghcr.io/scality/cloudserver
* Dashboard oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
* Policies oras image: ghcr.io/scality/cloudserver/cloudser-dashboard
* Production Namespace: registry.scality.com/cloudserver
* Dev Namespace: registry.scality.com/cloudserver-dev
With every CI build, the CI will push images, tagging the
content with the developer branch's short SHA-1 commit hash.
@ -19,8 +18,8 @@ Tagged versions of cloudserver will be stored in the production namespace.
## How to Pull Docker Images
```sh
docker pull ghcr.io/scality/cloudserver:<commit hash>
docker pull ghcr.io/scality/cloudserver:<tag>
docker pull registry.scality.com/cloudserver-dev/cloudserver:<commit hash>
docker pull registry.scality.com/cloudserver/cloudserver:<tag>
```
## Release Process

View File

@ -1,4 +1,4 @@
FROM ghcr.io/scality/federation/nodesvc-base:7.10.6.0
FROM registry.scality.com/federation/nodesvc-base:7.10.6.0
ENV S3_CONFIG_FILE=${CONF_DIR}/config.json
ENV S3_LOCATION_FILE=${CONF_DIR}/locationConfig.json
@ -14,10 +14,8 @@ RUN rm -f ~/.gitconfig && \
git config --global --add safe.directory . && \
git lfs install && \
GIT_LFS_SKIP_SMUDGE=1 && \
yarn global add typescript && \
yarn install --frozen-lockfile --production --network-concurrency 1 && \
yarn cache clean --all && \
yarn global remove typescript
yarn cache clean --all
# run symlinking separately to avoid yarn installation errors
# we might have to check if the symlinking is really needed!

View File

@ -1,10 +1,10 @@
'use strict'; // eslint-disable-line strict
require('werelogs').stderrUtils.catchAndTimestampStderr(
undefined,
// Do not exit as workers have their own listener that will exit
// But primary don't have another listener
require('cluster').isPrimary ? 1 : null,
);
/**
* Catch uncaught exceptions and add timestamp to aid debugging
*/
process.on('uncaughtException', err => {
process.stderr.write(`${new Date().toISOString()}: Uncaught exception: \n${err.stack}`);
});
require('./lib/server.js')();

View File

@ -107,47 +107,6 @@ function parseSproxydConfig(configSproxyd) {
return joi.attempt(configSproxyd, joiSchema, 'bad config');
}
function parseRedisConfig(redisConfig) {
const joiSchema = joi.object({
password: joi.string().allow(''),
host: joi.string(),
port: joi.number(),
retry: joi.object({
connectBackoff: joi.object({
min: joi.number().required(),
max: joi.number().required(),
jitter: joi.number().required(),
factor: joi.number().required(),
deadline: joi.number().required(),
}),
}),
// sentinel config
sentinels: joi.alternatives().try(
joi.string()
.pattern(/^[a-zA-Z0-9.-]+:[0-9]+(,[a-zA-Z0-9.-]+:[0-9]+)*$/)
.custom(hosts => hosts.split(',').map(item => {
const [host, port] = item.split(':');
return { host, port: Number.parseInt(port, 10) };
})),
joi.array().items(
joi.object({
host: joi.string().required(),
port: joi.number().required(),
})
).min(1),
),
name: joi.string(),
sentinelPassword: joi.string().allow(''),
})
.and('host', 'port')
.and('sentinels', 'name')
.xor('host', 'sentinels')
.without('sentinels', ['host', 'port'])
.without('host', ['sentinels', 'sentinelPassword']);
return joi.attempt(redisConfig, joiSchema, 'bad config');
}
function restEndpointsAssert(restEndpoints, locationConstraints) {
assert(typeof restEndpoints === 'object',
'bad config: restEndpoints must be an object of endpoints');
@ -377,7 +336,7 @@ function dmfLocationConstraintAssert(locationObj) {
function locationConstraintAssert(locationConstraints) {
const supportedBackends =
['mem', 'file', 'scality',
'mongodb', 'dmf', 'azure_archive', 'vitastor'].concat(Object.keys(validExternalBackends));
'mongodb', 'dmf', 'azure_archive'].concat(Object.keys(validExternalBackends));
assert(typeof locationConstraints === 'object',
'bad config: locationConstraints must be an object');
Object.keys(locationConstraints).forEach(l => {
@ -502,23 +461,27 @@ function locationConstraintAssert(locationConstraints) {
locationConstraints[l].details.connector.hdclient);
}
});
assert(Object.keys(locationConstraints)
.includes('us-east-1'), 'bad locationConfig: must ' +
'include us-east-1 as a locationConstraint');
}
function parseUtapiReindex(config) {
const {
enabled,
schedule,
redis,
sentinel,
bucketd,
onlyCountLatestWhenObjectLocked,
} = config;
assert(typeof enabled === 'boolean',
'bad config: utapi.reindex.enabled must be a boolean');
const parsedRedis = parseRedisConfig(redis);
assert(Array.isArray(parsedRedis.sentinels),
'bad config: utapi reindex redis config requires a list of sentinels');
'bad config: utapi.reindex.enabled must be a boolean');
assert(typeof sentinel === 'object',
'bad config: utapi.reindex.sentinel must be an object');
assert(typeof sentinel.port === 'number',
'bad config: utapi.reindex.sentinel.port must be a number');
assert(typeof sentinel.name === 'string',
'bad config: utapi.reindex.sentinel.name must be a string');
assert(typeof bucketd === 'object',
'bad config: utapi.reindex.bucketd must be an object');
assert(typeof bucketd.port === 'number',
@ -536,13 +499,6 @@ function parseUtapiReindex(config) {
'bad config: utapi.reindex.schedule must be a valid ' +
`cron schedule. ${e.message}.`);
}
return {
enabled,
schedule,
redis: parsedRedis,
bucketd,
onlyCountLatestWhenObjectLocked,
};
}
function requestsConfigAssert(requestsConfig) {
@ -630,6 +586,7 @@ class Config extends EventEmitter {
// Read config automatically
this._getLocationConfig();
this._getConfig();
this._configureBackends();
}
_getLocationConfig() {
@ -841,11 +798,11 @@ class Config extends EventEmitter {
this.websiteEndpoints = config.websiteEndpoints;
}
this.workers = false;
if (config.workers !== undefined) {
assert(Number.isInteger(config.workers) && config.workers > 0,
'bad config: workers must be a positive integer');
this.workers = config.workers;
this.clusters = false;
if (config.clusters !== undefined) {
assert(Number.isInteger(config.clusters) && config.clusters > 0,
'bad config: clusters must be a positive integer');
this.clusters = config.clusters;
}
if (config.usEastBehavior !== undefined) {
@ -1083,7 +1040,8 @@ class Config extends EventEmitter {
assert(typeof config.localCache.port === 'number',
'config: bad port for localCache. port must be a number');
if (config.localCache.password !== undefined) {
assert(typeof config.localCache.password === 'string',
assert(
this._verifyRedisPassword(config.localCache.password),
'config: vad password for localCache. password must' +
' be a string');
}
@ -1109,14 +1067,61 @@ class Config extends EventEmitter {
}
if (config.redis) {
this.redis = parseRedisConfig(config.redis);
if (config.redis.sentinels) {
this.redis = { sentinels: [], name: null };
assert(typeof config.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.redis.name = config.redis.name;
assert(Array.isArray(config.redis.sentinels) ||
typeof config.redis.sentinels === 'string',
'bad config: redis sentinels must be an array or string');
if (typeof config.redis.sentinels === 'string') {
config.redis.sentinels.split(',').forEach(item => {
const [host, port] = item.split(':');
this.redis.sentinels.push({ host,
port: Number.parseInt(port, 10) });
});
} else if (Array.isArray(config.redis.sentinels)) {
config.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.redis.sentinels.push({ host, port });
});
}
if (config.redis.sentinelPassword !== undefined) {
assert(
this._verifyRedisPassword(config.redis.sentinelPassword));
this.redis.sentinelPassword = config.redis.sentinelPassword;
}
} else {
// check for standalone configuration
this.redis = {};
assert(typeof config.redis.host === 'string',
'bad config: redis.host must be a string');
assert(typeof config.redis.port === 'number',
'bad config: redis.port must be a number');
this.redis.host = config.redis.host;
this.redis.port = config.redis.port;
}
if (config.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.redis.password),
'bad config: invalid password for redis. password must ' +
'be a string');
this.redis.password = config.redis.password;
}
}
if (config.scuba) {
this.scuba = {};
if (config.scuba.host) {
assert(typeof config.scuba.host === 'string',
'bad config: scuba host must be a string');
this.scuba.host = config.scuba.host;
this.scuba = { host: config.scuba.host };
}
if (config.scuba.port) {
assert(Number.isInteger(config.scuba.port)
@ -1136,19 +1141,6 @@ class Config extends EventEmitter {
port: Number(process.env.SCUBA_PORT),
};
}
if (this.scuba) {
this.quotaEnabled = true;
}
const maxStaleness = Number(process.env.QUOTA_MAX_STALENESS_MS) ||
config.quota?.maxStatenessMS ||
24 * 60 * 60 * 1000;
assert(Number.isInteger(maxStaleness), 'bad config: maxStalenessMS must be an integer');
const enableInflights = process.env.QUOTA_ENABLE_INFLIGHTS === 'true' ||
config.quota?.enableInflights || false;
this.quota = {
maxStaleness,
enableInflights,
};
if (config.utapi) {
this.utapi = { component: 's3' };
if (config.utapi.host) {
@ -1177,8 +1169,50 @@ class Config extends EventEmitter {
assert(config.redis, 'missing required property of utapi ' +
'configuration: redis');
if (config.utapi.redis) {
this.utapi.redis = parseRedisConfig(config.utapi.redis);
if (this.utapi.redis.retry === undefined) {
if (config.utapi.redis.sentinels) {
this.utapi.redis = { sentinels: [], name: null };
assert(typeof config.utapi.redis.name === 'string',
'bad config: redis sentinel name must be a string');
this.utapi.redis.name = config.utapi.redis.name;
assert(Array.isArray(config.utapi.redis.sentinels),
'bad config: redis sentinels must be an array');
config.utapi.redis.sentinels.forEach(item => {
const { host, port } = item;
assert(typeof host === 'string',
'bad config: redis sentinel host must be a string');
assert(typeof port === 'number',
'bad config: redis sentinel port must be a number');
this.utapi.redis.sentinels.push({ host, port });
});
} else {
// check for standalone configuration
this.utapi.redis = {};
assert(typeof config.utapi.redis.host === 'string',
'bad config: redis.host must be a string');
assert(typeof config.utapi.redis.port === 'number',
'bad config: redis.port must be a number');
this.utapi.redis.host = config.utapi.redis.host;
this.utapi.redis.port = config.utapi.redis.port;
}
if (config.utapi.redis.retry !== undefined) {
if (config.utapi.redis.retry.connectBackoff !== undefined) {
const { min, max, jitter, factor, deadline } = config.utapi.redis.retry.connectBackoff;
assert.strictEqual(typeof min, 'number',
'utapi.redis.retry.connectBackoff: min must be a number');
assert.strictEqual(typeof max, 'number',
'utapi.redis.retry.connectBackoff: max must be a number');
assert.strictEqual(typeof jitter, 'number',
'utapi.redis.retry.connectBackoff: jitter must be a number');
assert.strictEqual(typeof factor, 'number',
'utapi.redis.retry.connectBackoff: factor must be a number');
assert.strictEqual(typeof deadline, 'number',
'utapi.redis.retry.connectBackoff: deadline must be a number');
}
this.utapi.redis.retry = config.utapi.redis.retry;
} else {
this.utapi.redis.retry = {
connectBackoff: {
min: 10,
@ -1189,6 +1223,22 @@ class Config extends EventEmitter {
},
};
}
if (config.utapi.redis.password !== undefined) {
assert(
this._verifyRedisPassword(config.utapi.redis.password),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.password = config.utapi.redis.password;
}
if (config.utapi.redis.sentinelPassword !== undefined) {
assert(
this._verifyRedisPassword(
config.utapi.redis.sentinelPassword),
'config: invalid password for utapi redis. password' +
' must be a string');
this.utapi.redis.sentinelPassword =
config.utapi.redis.sentinelPassword;
}
}
if (config.utapi.metrics) {
this.utapi.metrics = config.utapi.metrics;
@ -1258,7 +1308,8 @@ class Config extends EventEmitter {
}
if (config.utapi && config.utapi.reindex) {
this.utapi.reindex = parseUtapiReindex(config.utapi.reindex);
parseUtapiReindex(config.utapi.reindex);
this.utapi.reindex = config.utapi.reindex;
}
}
@ -1303,8 +1354,6 @@ class Config extends EventEmitter {
}
}
this.authdata = config.authdata || 'authdata.json';
this.kms = {};
if (config.kms) {
assert(typeof config.kms.userName === 'string');
@ -1524,6 +1573,25 @@ class Config extends EventEmitter {
this.outboundProxy.certs = certObj.certs;
}
this.managementAgent = {};
this.managementAgent.port = 8010;
this.managementAgent.host = 'localhost';
if (config.managementAgent !== undefined) {
if (config.managementAgent.port !== undefined) {
assert(Number.isInteger(config.managementAgent.port)
&& config.managementAgent.port > 0,
'bad config: managementAgent port must be a positive ' +
'integer');
this.managementAgent.port = config.managementAgent.port;
}
if (config.managementAgent.host !== undefined) {
assert.strictEqual(typeof config.managementAgent.host, 'string',
'bad config: management agent host must ' +
'be a string');
this.managementAgent.host = config.managementAgent.host;
}
}
// Ephemeral token to protect the reporting endpoint:
// try inherited from parent first, then hardcoded in conf file,
// then create a fresh one as last resort.
@ -1613,8 +1681,6 @@ class Config extends EventEmitter {
'bad config: maxScannedLifecycleListingEntries must be greater than 2');
this.maxScannedLifecycleListingEntries = config.maxScannedLifecycleListingEntries;
}
this._configureBackends(config);
}
_setTimeOptions() {
@ -1653,43 +1719,40 @@ class Config extends EventEmitter {
}
_getAuthData() {
return JSON.parse(fs.readFileSync(findConfigFile(process.env.S3AUTH_CONFIG || this.authdata), { encoding: 'utf-8' }));
return require(findConfigFile(process.env.S3AUTH_CONFIG || 'authdata.json'));
}
_configureBackends(config) {
const backends = config.backends || {};
_configureBackends() {
/**
* Configure the backends for Authentication, Data and Metadata.
*/
let auth = backends.auth || 'mem';
let data = backends.data || 'multiple';
let metadata = backends.metadata || 'file';
let kms = backends.kms || 'file';
let quota = backends.quota || 'none';
let auth = 'mem';
let data = 'multiple';
let metadata = 'file';
let kms = 'file';
if (process.env.S3BACKEND) {
const validBackends = ['mem', 'file', 'scality', 'cdmi'];
assert(validBackends.indexOf(process.env.S3BACKEND) > -1,
'bad environment variable: S3BACKEND environment variable ' +
'should be one of mem/file/scality/cdmi'
);
auth = process.env.S3BACKEND == 'scality' ? 'scality' : 'mem';
auth = process.env.S3BACKEND;
data = process.env.S3BACKEND;
metadata = process.env.S3BACKEND;
kms = process.env.S3BACKEND;
}
if (process.env.S3VAULT) {
auth = process.env.S3VAULT;
auth = (auth === 'file' || auth === 'mem' || auth === 'cdmi' ? 'mem' : auth);
}
if (auth === 'file' || auth === 'mem' || auth === 'cdmi') {
// Auth only checks for 'mem' since mem === file
auth = 'mem';
let authData;
if (process.env.SCALITY_ACCESS_KEY_ID &&
process.env.SCALITY_SECRET_ACCESS_KEY) {
process.env.SCALITY_SECRET_ACCESS_KEY) {
authData = buildAuthDataAccount(
process.env.SCALITY_ACCESS_KEY_ID,
process.env.SCALITY_SECRET_ACCESS_KEY);
process.env.SCALITY_ACCESS_KEY_ID,
process.env.SCALITY_SECRET_ACCESS_KEY);
} else {
authData = this._getAuthData();
}
@ -1697,7 +1760,7 @@ class Config extends EventEmitter {
throw new Error('bad config: invalid auth config file.');
}
this.authData = authData;
} else if (auth === 'multiple') {
} else if (auth === 'multiple') {
const authData = this._getAuthData();
if (validateAuthConfig(authData)) {
throw new Error('bad config: invalid auth config file.');
@ -1712,9 +1775,9 @@ class Config extends EventEmitter {
'should be one of mem/file/scality/multiple'
);
data = process.env.S3DATA;
if (data === 'scality' || data === 'multiple') {
data = 'multiple';
}
}
if (data === 'scality' || data === 'multiple') {
data = 'multiple';
}
assert(this.locationConstraints !== undefined &&
this.restEndpoints !== undefined,
@ -1727,18 +1790,18 @@ class Config extends EventEmitter {
if (process.env.S3KMS) {
kms = process.env.S3KMS;
}
if (process.env.S3QUOTA) {
quota = process.env.S3QUOTA;
}
this.backends = {
auth,
data,
metadata,
kms,
quota,
};
}
_verifyRedisPassword(password) {
return typeof password === 'string';
}
setAuthDataAccounts(accounts) {
this.authData.accounts = accounts;
this.emit('authdata-update');
@ -1861,19 +1924,10 @@ class Config extends EventEmitter {
.update(instanceId)
.digest('hex');
}
isQuotaEnabled() {
return !!this.quotaEnabled;
}
isQuotaInflightEnabled() {
return this.quota.enableInflights;
}
}
module.exports = {
parseSproxydConfig,
parseRedisConfig,
locationConstraintAssert,
ConfigObject: Config,
config: new Config(),

View File

@ -85,10 +85,6 @@ const api = {
// Attach the apiMethod method to the request, so it can used by monitoring in the server
// eslint-disable-next-line no-param-reassign
request.apiMethod = apiMethod;
// Array of end of API callbacks, used to perform some logic
// at the end of an API.
// eslint-disable-next-line no-param-reassign
request.finalizerHooks = [];
const actionLog = monitoringMap[apiMethod];
if (!actionLog &&
@ -155,6 +151,7 @@ const api = {
function checkAuthResults(authResults) {
let returnTagCount = true;
const isImplicitDeny = {};
let accountQuotas = {};
let isOnlyImplicitDeny = true;
if (apiMethod === 'objectGet') {
// first item checks s3:GetObject(Version) action
@ -187,27 +184,25 @@ const api = {
}
}
}
accountQuotas = authResults?.[0]?.accountQuota;
// These two APIs cannot use ACLs or Bucket Policies, hence, any
// implicit deny from vault must be treated as an explicit deny.
if ((apiMethod === 'bucketPut' || apiMethod === 'serviceGet') && isOnlyImplicitDeny) {
return errors.AccessDenied;
}
return { returnTagCount, isImplicitDeny };
return { returnTagCount, isImplicitDeny, accountQuotas };
}
return async.waterfall([
next => auth.server.doAuth(
request, log, (err, userInfo, authorizationResults, streamingV4Params, infos) => {
request, log, (err, userInfo, authorizationResults, streamingV4Params) => {
if (err) {
// VaultClient returns standard errors, but the route requires
// Arsenal errors
const arsenalError = err.metadata ? err : errors[err.code] || errors.InternalError;
log.trace('authentication error', { error: err });
return next(arsenalError);
return next(err);
}
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
return next(null, userInfo, authorizationResults, streamingV4Params);
}, 's3', requestContexts),
(userInfo, authorizationResults, streamingV4Params, infos, next) => {
(userInfo, authorizationResults, streamingV4Params, next) => {
const authNames = { accountName: userInfo.getAccountDisplayName() };
if (userInfo.isRequesterAnIAMUser()) {
authNames.userName = userInfo.getIAMdisplayName();
@ -217,7 +212,7 @@ const api = {
}
log.addDefaultFields(authNames);
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
return next(null, userInfo, authorizationResults, streamingV4Params);
}
// issue 100 Continue to the client
writeContinue(request, response);
@ -248,12 +243,12 @@ const api = {
}
// Convert array of post buffers into one string
request.post = Buffer.concat(post, postLength).toString();
return next(null, userInfo, authorizationResults, streamingV4Params, infos);
return next(null, userInfo, authorizationResults, streamingV4Params);
});
return undefined;
},
// Tag condition keys require information from CloudServer for evaluation
(userInfo, authorizationResults, streamingV4Params, infos, next) => tagConditionKeyAuth(
(userInfo, authorizationResults, streamingV4Params, next) => tagConditionKeyAuth(
authorizationResults,
request,
requestContexts,
@ -264,14 +259,13 @@ const api = {
log.trace('tag authentication error', { error: err });
return next(err);
}
return next(null, userInfo, authResultsWithTags, streamingV4Params, infos);
return next(null, userInfo, authResultsWithTags, streamingV4Params);
},
),
], (err, userInfo, authorizationResults, streamingV4Params, infos) => {
], (err, userInfo, authorizationResults, streamingV4Params) => {
if (err) {
return callback(err);
}
request.accountQuotas = infos?.accountQuota;
if (authorizationResults) {
const checkedResults = checkAuthResults(authorizationResults);
if (checkedResults instanceof Error) {
@ -279,6 +273,7 @@ const api = {
}
returnTagCount = checkedResults.returnTagCount;
request.actionImplicitDenies = checkedResults.isImplicitDeny;
request.accountQuotas = checkedResults.accountQuotas;
} else {
// create an object of keys apiMethods with all values to false:
// for backward compatibility, all apiMethods are allowed by default
@ -287,24 +282,24 @@ const api = {
acc[curr] = false;
return acc;
}, {});
request.accountQuotas = apiMethods.reduce((acc, curr) => {
acc[curr] = undefined;
return acc;
}, {});
}
const methodCallback = (err, ...results) => async.forEachLimit(request.finalizerHooks, 5,
(hook, done) => hook(err, done),
() => callback(err, ...results));
if (apiMethod === 'objectPut' || apiMethod === 'objectPutPart') {
request._response = response;
return this[apiMethod](userInfo, request, streamingV4Params,
log, methodCallback, authorizationResults);
log, callback, authorizationResults);
}
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
return this[apiMethod](userInfo, request, sourceBucket,
sourceObject, sourceVersionId, log, methodCallback);
sourceObject, sourceVersionId, log, callback);
}
if (apiMethod === 'objectGet') {
return this[apiMethod](userInfo, request, returnTagCount, log, callback);
}
return this[apiMethod](userInfo, request, log, methodCallback);
return this[apiMethod](userInfo, request, log, callback);
});
},
bucketDelete,

View File

@ -1,8 +1,9 @@
const { evaluators, actionMaps, RequestContext, requestUtils } = require('arsenal').policies;
const { evaluators, actionMaps, actionNeedQuotaCheck, RequestContext, requestUtils } = require('arsenal').policies;
const { errors } = require('arsenal');
const { parseCIDR, isValid } = require('ipaddr.js');
const constants = require('../../../../constants');
const { config } = require('../../../Config');
const { ScubaClientInstance } = require('../../../scuba/wrapper');
const {
allAuthedUsersId,
@ -372,6 +373,124 @@ function processBucketPolicy(requestType, bucket, canonicalID, arn, bucketOwner,
return processedResult;
}
/**
*
* @param {BucketInfo} bucket - bucket object
* @param {Account} account - account object
* @param {array} apiNames - action names: operations to authorize
* @param {string} apiMethod - the main API call
* @param {number} inflight - inflight bytes
* @param {Logger} log - logger
* @param {function} callback - callback
* @returns {boolean} - true if the quota is valid, false otherwise
*/
async function validateQuotas(bucket, account, apiNames, apiMethod, inflight, log, callback) {
console.log('evaluate quota with', bucket, account, apiNames, apiMethod, inflight)
const bucketQuota = bucket.getQuota();
const accountQuota = account?.quota || 0;
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
if ((bucketQuota <= 0 && accountQuota <= 0) || !ScubaClientInstance?.enabled) {
if (bucketQuota > 0 || accountQuota > 0) {
log.warn('quota is set for a bucket, but scuba is disabled', {
bucketName: bucket.getName(),
});
}
return callback();
}
const creationDate = new Date(bucket.getCreationDate()).getTime();
try {
// A potential optimiation, if inflights are disabled, is to only evaluate
// the lowest quota.
// eslint-disable-next-line no-restricted-syntax
for (const apiName of apiNames) {
let shouldEvaluateCopyObject = false;
if (apiName === 'objectGet' && (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart')) {
shouldEvaluateCopyObject = true;
// eslint-disable-next-line no-param-reassign
inflight = Math.abs(inflight);
}
if (!shouldEvaluateCopyObject && !actionNeedQuotaCheck[apiName]) {
continue;
}
// eslint-disable-next-line no-await-in-loop
const bucketMetrics = await ScubaClientInstance.getLatestMetrics('bucket',
`${bucket.getName()}_${creationDate}`, null, {
action: apiName,
inflight,
});
if (bucketMetrics.bytesTotal > bucketQuota) {
log.debug('Bucket quota exceeded', {
bucket: bucket.getName(),
action: apiName,
inflight,
quota: bucketQuota,
bytesTotal: bucketMetrics.bytesTotal,
});
bucketQuotaExceeded = true;
}
if (accountQuota > 0 && account?.account) {
// eslint-disable-next-line no-await-in-loop
const accountMetrics = await ScubaClientInstance.getLatestMetrics('account',
`${account.account}_${creationDate}`, null, {
action: apiName,
inflight,
});
if (accountMetrics.bytesTotal > account.quota) {
log.debug('Account quota exceeded', {
accountId: account.account,
action: apiName,
inflight,
quota: account.quota,
bytesTotal: accountMetrics.bytesTotal,
});
accountQuotaExceeded = true;
}
}
}
if (bucketQuotaExceeded || accountQuotaExceeded) {
if (apiMethod?.endsWith('Delete')) {
return callback();
}
// clean any inflight bytes
if (inflight > 0) {
// eslint-disable-next-line no-await-in-loop
await ScubaClientInstance.getLatestMetrics('bucket',
`${bucket.getName()}_${creationDate}`, null, {
action: apiMethod,
inflight: -inflight,
});
if (account?.quota) {
// eslint-disable-next-line no-await-in-loop
await ScubaClientInstance.getLatestMetrics('account',
`${account.account}_${creationDate}`, null, {
action: apiMethod,
inflight: -inflight,
});
}
}
return callback(errors.QuotaExceeded);
}
return callback();
} catch (err) {
log.warn('Error getting metrics from scuba, allowing the request', {
error: err.name,
description: err.message,
});
if (bucketQuotaExceeded || accountQuotaExceeded) {
return callback(errors.QuotaExceeded);
}
return callback();
}
}
function isBucketAuthorized(bucket, requestTypesInput, canonicalID, authInfo, log, request,
actionImplicitDeniesInput = {}, isWebsite = false) {
const requestTypes = Array.isArray(requestTypesInput) ? requestTypesInput : [requestTypesInput];
@ -626,6 +745,7 @@ function isLifecycleSession(arn) {
}
module.exports = {
validateQuotas,
isBucketAuthorized,
isObjAuthorized,
getServiceAccountProperties,

View File

@ -52,7 +52,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
apiMethod, 's3');
}
if (apiMethod === 'bucketPut') {
if (apiMethod === 'multiObjectDelete' || apiMethod === 'bucketPut') {
return null;
}
@ -65,17 +65,7 @@ function prepareRequestContexts(apiMethod, request, sourceBucket,
const requestContexts = [];
if (apiMethod === 'multiObjectDelete') {
// MultiObjectDelete does not require any authorization when evaluating
// the API. Instead, we authorize each object passed.
// But in order to get any relevant information from the authorization service
// for example, the account quota, we must send a request context object
// with no `specificResource`. We expect the result to be an implicit deny.
// In the API, we then ignore these authorization results, and we can use
// any information returned, e.g., the quota.
const requestContextMultiObjectDelete = generateRequestContext('objectDelete');
requestContexts.push(requestContextMultiObjectDelete);
} else if (apiMethodAfterVersionCheck === 'objectCopy'
if (apiMethodAfterVersionCheck === 'objectCopy'
|| apiMethodAfterVersionCheck === 'objectPutCopyPart') {
const objectGetAction = sourceVersionId ? 'objectGetVersion' :
'objectGet';

View File

@ -8,7 +8,7 @@ const { pushMetric } = require('../../../utapi/utilities');
const { decodeVersionId } = require('./versioning');
const collectCorsHeaders = require('../../../utilities/collectCorsHeaders');
const { parseRestoreRequestXml } = s3middleware.objectRestore;
const { processBytesToWrite, validateQuotas } = require('../quotas/quotaUtils');
/**
* Check if tier is supported
@ -59,13 +59,6 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
objectKey,
versionId: decodedVidResult,
requestType: request.apiMethods || 'restoreObject',
/**
* Restoring an object might not cause any impact on
* the storage, if the object is already restored: in
* this case, the duration is extended. We disable the
* quota evaluation and trigger it manually.
*/
checkQuota: false,
request,
};
@ -124,16 +117,6 @@ function objectRestore(metadata, mdUtils, userInfo, request, log, callback) {
return next(err, bucketMD, objectMD);
});
},
function evaluateQuotas(bucketMD, objectMD, next) {
if (isObjectRestored) {
return next(null, bucketMD, objectMD);
}
const actions = Array.isArray(mdValueParams.requestType) ?
mdValueParams.requestType : [mdValueParams.requestType];
const bytes = processBytesToWrite(request.apiMethod, bucketMD, mdValueParams.versionId, 0, objectMD);
return validateQuotas(request, bucketMD, request.accountQuotas, actions, request.apiMethod, bytes,
false, log, err => next(err, bucketMD, objectMD));
},
function updateObjectMD(bucketMD, objectMD, next) {
const params = objectMD.versionId ? { versionId: objectMD.versionId } : {};
metadata.putObjectMD(bucketMD.getName(), objectKey, objectMD, params,

View File

@ -1,314 +0,0 @@
const async = require('async');
const { errors } = require('arsenal');
const monitoring = require('../../../utilities/monitoringHandler');
const {
actionNeedQuotaCheckCopy,
actionNeedQuotaCheck,
actionWithDataDeletion,
} = require('arsenal').policies;
const { config } = require('../../../Config');
const QuotaService = require('../../../quotas/quotas');
/**
* Process the bytes to write based on the request and object metadata
* @param {string} apiMethod - api method
* @param {BucketInfo} bucket - bucket info
* @param {string} versionId - version id of the object
* @param {number} contentLength - content length of the object
* @param {object} objMD - object metadata
* @param {object} destObjMD - destination object metadata
* @return {number} processed content length
*/
function processBytesToWrite(apiMethod, bucket, versionId, contentLength, objMD, destObjMD = null) {
let bytes = contentLength;
if (apiMethod === 'objectRestore') {
// object is being restored
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bytes && objMD?.['content-length']) {
if (apiMethod === 'objectCopy' || apiMethod === 'objectPutCopyPart') {
if (!destObjMD || bucket.isVersioningEnabled()) {
// object is being copied
bytes = Number.parseInt(objMD['content-length'], 10);
} else if (!bucket.isVersioningEnabled()) {
// object is being copied and replaces the target
bytes = Number.parseInt(objMD['content-length'], 10) -
Number.parseInt(destObjMD['content-length'], 10);
}
} else if (!bucket.isVersioningEnabled() || bucket.isVersioningEnabled() && versionId) {
// object is being deleted
bytes = -Number.parseInt(objMD['content-length'], 10);
}
} else if (bytes && objMD?.['content-length'] && !bucket.isVersioningEnabled()) {
// object is being replaced: store the diff, if the bucket is not versioned
bytes = bytes - Number.parseInt(objMD['content-length'], 10);
}
return bytes || 0;
}
/**
* Checks if a metric is stale based on the provided parameters.
*
* @param {Object} metric - The metric object to check.
* @param {string} resourceType - The type of the resource.
* @param {string} resourceName - The name of the resource.
* @param {string} action - The action being performed.
* @param {number} inflight - The number of inflight requests.
* @param {Object} log - The logger object.
* @returns {boolean} Returns true if the metric is stale, false otherwise.
*/
function isMetricStale(metric, resourceType, resourceName, action, inflight, log) {
if (metric.date && Date.now() - new Date(metric.date).getTime() >
QuotaService.maxStaleness) {
log.warn('Stale metrics from the quota service, allowing the request', {
resourceType,
resourceName,
action,
inflight,
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
return true;
}
return false;
}
/**
* Evaluates quotas for a bucket and an account and update inflight count.
*
* @param {number} bucketQuota - The quota limit for the bucket.
* @param {number} accountQuota - The quota limit for the account.
* @param {object} bucket - The bucket object.
* @param {object} account - The account object.
* @param {number} inflight - The number of inflight requests.
* @param {number} inflightForCheck - The number of inflight requests for checking quotas.
* @param {string} action - The action being performed.
* @param {object} log - The logger object.
* @param {function} callback - The callback function to be called when evaluation is complete.
* @returns {object} - The result of the evaluation.
*/
function _evaluateQuotas(
bucketQuota,
accountQuota,
bucket,
account,
inflight,
inflightForCheck,
action,
log,
callback,
) {
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
const creationDate = new Date(bucket.getCreationDate()).getTime();
return async.parallel({
bucketQuota: parallelDone => {
if (bucketQuota > 0) {
return QuotaService.getUtilizationMetrics('bucket',
`${bucket.getName()}_${creationDate}`, null, {
action,
inflight,
}, (err, bucketMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(bucketMetrics, 'bucket', bucket.getName(), action, inflight, log) &&
bucketMetrics.bytesTotal + inflightForCheck > bucketQuota) {
log.debug('Bucket quota exceeded', {
bucket: bucket.getName(),
action,
inflight,
quota: bucketQuota,
bytesTotal: bucketMetrics.bytesTotal,
});
bucketQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
accountQuota: parallelDone => {
if (accountQuota > 0 && account?.account) {
return QuotaService.getUtilizationMetrics('account',
account.account, null, {
action,
inflight,
}, (err, accountMetrics) => {
if (err || inflight < 0) {
return parallelDone(err);
}
if (!isMetricStale(accountMetrics, 'account', account.account, action, inflight, log) &&
accountMetrics.bytesTotal + inflightForCheck > accountQuota) {
log.debug('Account quota exceeded', {
accountId: account.account,
action,
inflight,
quota: accountQuota,
bytesTotal: accountMetrics.bytesTotal,
});
accountQuotaExceeded = true;
}
return parallelDone();
});
}
return parallelDone();
},
}, err => {
if (err) {
log.warn('Error evaluating quotas', {
error: err.name,
description: err.message,
isInflightDeletion: inflight < 0,
});
}
return callback(err, bucketQuotaExceeded, accountQuotaExceeded);
});
}
/**
* Monitors the duration of quota evaluation for a specific API method.
*
* @param {string} apiMethod - The name of the API method being monitored.
* @param {string} type - The type of quota being evaluated.
* @param {string} code - The code associated with the quota being evaluated.
* @param {number} duration - The duration of the quota evaluation in nanoseconds.
* @returns {undefined} - Returns nothing.
*/
function monitorQuotaEvaluationDuration(apiMethod, type, code, duration) {
monitoring.quotaEvaluationDuration.labels({
action: apiMethod,
type,
code,
}).observe(duration / 1e9);
}
/**
*
* @param {Request} request - request object
* @param {BucketInfo} bucket - bucket object
* @param {Account} account - account object
* @param {array} apiNames - action names: operations to authorize
* @param {string} apiMethod - the main API call
* @param {number} inflight - inflight bytes
* @param {boolean} isStorageReserved - Flag to check if the current quota, minus
* the incoming bytes, are under the limit.
* @param {Logger} log - logger
* @param {function} callback - callback function
* @returns {boolean} - true if the quota is valid, false otherwise
*/
function validateQuotas(request, bucket, account, apiNames, apiMethod, inflight, isStorageReserved, log, callback) {
if (!config.isQuotaEnabled() || (!inflight && isStorageReserved)) {
return callback(null);
}
let type;
let bucketQuotaExceeded = false;
let accountQuotaExceeded = false;
let quotaEvaluationDuration;
const requestStartTime = process.hrtime.bigint();
const bucketQuota = bucket.getQuota();
const accountQuota = account?.quota || 0;
const shouldSendInflights = config.isQuotaInflightEnabled();
if (bucketQuota && accountQuota) {
type = 'bucket+account';
} else if (bucketQuota) {
type = 'bucket';
} else {
type = 'account';
}
if (actionWithDataDeletion[apiMethod]) {
type = 'delete';
}
if ((bucketQuota <= 0 && accountQuota <= 0) || !QuotaService?.enabled) {
if (bucketQuota > 0 || accountQuota > 0) {
log.warn('quota is set for a bucket, but the quota service is disabled', {
bucketName: bucket.getName(),
});
monitoring.requestWithQuotaMetricsUnavailable.inc();
}
return callback(null);
}
if (isStorageReserved) {
// eslint-disable-next-line no-param-reassign
inflight = 0;
}
return async.forEach(apiNames, (apiName, done) => {
// Object copy operations first check the target object,
// meaning the source object, containing the current bytes,
// is checked second. This logic handles these APIs calls by
// ensuring the bytes are positives (i.e., not an object
// replacement).
if (actionNeedQuotaCheckCopy(apiName, apiMethod)) {
// eslint-disable-next-line no-param-reassign
inflight = Math.abs(inflight);
} else if (!actionNeedQuotaCheck[apiName] && !actionWithDataDeletion[apiName]) {
return done();
}
// When inflights are disabled, the sum of the current utilization metrics
// and the current bytes are compared with the quota. The current bytes
// are not sent to the utilization service. When inflights are enabled,
// the sum of the current utilization metrics only are compared with the
// quota. They include the current inflight bytes sent in the request.
let _inflights = shouldSendInflights ? inflight : undefined;
const inflightForCheck = shouldSendInflights ? 0 : inflight;
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
inflightForCheck, apiName, log,
(err, _bucketQuotaExceeded, _accountQuotaExceeded) => {
if (err) {
return done(err);
}
bucketQuotaExceeded = _bucketQuotaExceeded;
accountQuotaExceeded = _accountQuotaExceeded;
// Inflights are inverted: in case of cleanup, we just re-issue
// the same API call.
if (_inflights) {
_inflights = -_inflights;
}
request.finalizerHooks.push((errorFromAPI, _done) => {
const code = (bucketQuotaExceeded || accountQuotaExceeded) ? 429 : 200;
const quotaCleanUpStartTime = process.hrtime.bigint();
// Quotas are cleaned only in case of error in the API
async.waterfall([
cb => {
if (errorFromAPI) {
return _evaluateQuotas(bucketQuota, accountQuota, bucket, account, _inflights,
null, apiName, log, cb);
}
return cb();
},
], () => {
monitorQuotaEvaluationDuration(apiMethod, type, code, quotaEvaluationDuration +
Number(process.hrtime.bigint() - quotaCleanUpStartTime));
return _done();
});
});
return done();
});
}, err => {
quotaEvaluationDuration = Number(process.hrtime.bigint() - requestStartTime);
if (err) {
log.warn('Error getting metrics from the quota service, allowing the request', {
error: err.name,
description: err.message,
});
}
if (!actionWithDataDeletion[apiMethod] &&
(bucketQuotaExceeded || accountQuotaExceeded)) {
return callback(errors.QuotaExceeded);
}
return callback();
});
}
module.exports = {
processBytesToWrite,
isMetricStale,
validateQuotas,
};

View File

@ -5,8 +5,6 @@ const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const requestType = 'bucketDeleteQuota';
/**
* Bucket Update Quota - Update bucket quota
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
@ -22,7 +20,7 @@ function bucketDeleteQuota(authInfo, request, log, callback) {
const metadataValParams = {
authInfo,
bucketName,
requestType: request.apiMethods || requestType,
requestType: request.apiMethods || 'bucketDeleteQuota',
request,
};
return waterfall([
@ -44,13 +42,14 @@ function bucketDeleteQuota(authInfo, request, log, callback) {
monitoring.promMetrics('DELETE', bucketName, err.code,
'bucketDeleteQuota');
return callback(err, err.code, corsHeaders);
} else {
monitoring.promMetrics(
'DELETE', bucketName, '204', 'bucketDeleteQuota');
pushMetric('bucketDeleteQuota', log, {
authInfo,
bucket: bucketName,
});
}
monitoring.promMetrics(
'DELETE', bucketName, '204', 'bucketDeleteQuota');
pushMetric('bucketDeleteQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, 204, corsHeaders);
});
}

View File

@ -21,6 +21,11 @@ function bucketGetQuota(authInfo, request, log, callback) {
request,
};
const xml = [];
xml.push(
'<?xml version="1.0" encoding="UTF-8"?>',
'<GetBucketQuota>',
'<Name>', bucketName, '</Name>',
);
return standardMetadataValidateBucket(metadataValParams, request.actionImplicitDenies, log, (err, bucket) => {
const corsHeaders = collectCorsHeaders(headers.origin, method, bucket);
@ -31,11 +36,6 @@ function bucketGetQuota(authInfo, request, log, callback) {
});
return callback(err, null, corsHeaders);
}
xml.push(
'<?xml version="1.0" encoding="UTF-8"?>',
'<GetBucketQuota>',
'<Name>', bucket.getName(), '</Name>',
);
const bucketQuota = bucket.getQuota();
if (!bucketQuota) {
log.debug('bucket has no quota', {

View File

@ -45,8 +45,9 @@ function checkLocationConstraint(request, locationConstraint, log) {
} else if (parsedHost && restEndpoints[parsedHost]) {
locationConstraintChecked = restEndpoints[parsedHost];
} else {
locationConstraintChecked = Object.keys(locationConstrains)[0];
log.trace('no location constraint provided on bucket put; setting '+locationConstraintChecked);
log.trace('no location constraint provided on bucket put;' +
'setting us-east-1');
locationConstraintChecked = 'us-east-1';
}
if (!locationConstraints[locationConstraintChecked]) {

View File

@ -5,37 +5,15 @@ const { standardMetadataValidateBucket } = require('../metadata/metadataUtils');
const metadata = require('../metadata/wrapper');
const { pushMetric } = require('../utapi/utilities');
const monitoring = require('../utilities/monitoringHandler');
const { parseString } = require('xml2js');
function validateBucketQuotaProperty(requestBody, next) {
const quota = requestBody.quota;
const quotaValue = parseInt(quota, 10);
if (Number.isNaN(quotaValue)) {
return next(errors.InvalidArgument.customizeDescription('Quota Value should be a number'));
}
if (quotaValue <= 0) {
return next(errors.InvalidArgument.customizeDescription('Quota value must be a positive number'));
}
return next(null, quotaValue);
}
function parseRequestBody(requestBody, next) {
try {
const jsonData = JSON.parse(requestBody);
if (typeof jsonData !== 'object') {
throw new Error('Invalid JSON');
}
return next(null, jsonData);
} catch (jsonError) {
return parseString(requestBody, (xmlError, xmlData) => {
if (xmlError) {
return next(errors.InvalidArgument.customizeDescription('Request body must be a JSON object'));
}
return next(null, xmlData);
});
}
}
/**
* Bucket Update Quota - Update bucket quota
* @param {AuthInfo} authInfo - Instance of AuthInfo class with requester's info
* @param {object} request - http request object
* @param {object} log - Werelogs logger
* @param {function} callback - callback to server
* @return {undefined}
*/
function bucketUpdateQuota(authInfo, request, log, callback) {
log.debug('processing request', { method: 'bucketUpdateQuota' });
@ -53,11 +31,28 @@ function bucketUpdateQuota(authInfo, request, log, callback) {
bucket = b;
return next(err, bucket);
}),
(bucket, next) => parseRequestBody(request.post, (err, requestBody) => next(err, bucket, requestBody)),
(bucket, requestBody, next) => validateBucketQuotaProperty(requestBody, (err, quotaValue) =>
next(err, bucket, quotaValue)),
(bucket, quotaValue, next) => {
bucket.setQuota(quotaValue);
(bucket, next) => {
let requestBody;
try {
requestBody = JSON.parse(request.post);
} catch (parseError) {
return next(errors.InvalidArgument.customizeDescription('Invalid JSON format in request'));
}
if (typeof requestBody !== 'object' || Array.isArray(requestBody)) {
return next(errors.InvalidArgument.customizeDescription('Request body must be a JSON object'));
}
return next(null, bucket, requestBody);
},
(bucket, requestBody, next) => {
const quota = parseInt(requestBody.quota, 10);
if (Number.isNaN(quota)) {
return next(errors.InvalidArgument.customizeDescription('Quota Value should be a number'));
}
if (quota <= 0) {
return next(errors.InvalidArgument.customizeDescription('Quota value must be a positive number'));
}
// Update the bucket quota
bucket.setQuota(quota);
return metadata.updateBucket(bucket.getName(), bucket, log, next);
},
], (err, bucket) => {
@ -71,13 +66,14 @@ function bucketUpdateQuota(authInfo, request, log, callback) {
monitoring.promMetrics('PUT', bucketName, err.code,
'updateBucketQuota');
return callback(err, err.code, corsHeaders);
} else {
monitoring.promMetrics(
'PUT', bucketName, '200', 'updateBucketQuota');
pushMetric('updateBucketQuota', log, {
authInfo,
bucket: bucketName,
});
}
monitoring.promMetrics(
'PUT', bucketName, '200', 'updateBucketQuota');
pushMetric('updateBucketQuota', log, {
authInfo,
bucket: bucketName,
});
return callback(null, corsHeaders);
});
}

View File

@ -6,7 +6,6 @@ const convertToXml = s3middleware.convertToXml;
const { pushMetric } = require('../utapi/utilities');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const { hasNonPrintables } = require('../utilities/stringChecks');
const { config } = require('../Config');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const constants = require('../../constants');
const services = require('../services');
@ -66,7 +65,7 @@ function initiateMultipartUpload(authInfo, request, log, callback) {
const websiteRedirectHeader =
request.headers['x-amz-website-redirect-location'];
if (request.headers['x-amz-storage-class'] &&
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', bucketName,
errors.InvalidStorageClass.code, 'initiateMultipartUpload');

View File

@ -11,7 +11,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const metadata = require('../metadata/wrapper');
const services = require('../services');
const vault = require('../auth/vault');
const { isBucketAuthorized, evaluateBucketPolicyWithIAM } =
const { isBucketAuthorized, evaluateBucketPolicyWithIAM, validateQuotas } =
require('./apiUtils/authorization/permissionChecks');
const { preprocessingVersioningDelete }
= require('./apiUtils/object/versioning');
@ -31,7 +31,6 @@ const { overheadField } = require('../../constants');
const versionIdUtils = versioning.VersionID;
const { data } = require('../data/wrapper');
const logger = require('../utilities/logger');
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
/*
Format of xml request:
@ -333,9 +332,11 @@ function getObjMetadataAndDelete(authInfo, canonicalID, request,
return callback(null, objMD, versionId);
},
// TODO handle inflightsd here
(objMD, versionId, callback) => validateQuotas(
request, bucket, request.accountQuotas, ['objectDelete'], 'objectDelete',
-objMD?.['content-length'] || 0, false, log, err => callback(err, objMD, versionId)),
bucket, request.accountQuotas, ['objectDelete'], 'objectDelete',
objMD?.['content-length'] || 0, log, err =>
callback(err, objMD, versionId)),
(objMD, versionId, callback) => {
const options = preprocessingVersioningDelete(
bucketName, bucket, objMD, versionId, config.nullVersionCompatMode);
@ -508,9 +509,8 @@ function multiObjectDelete(authInfo, request, log, callback) {
if (bucketShield(bucketMD, 'objectDelete')) {
return next(errors.NoSuchBucket);
}
// The implicit deny flag is ignored in the DeleteObjects API, as authorization only
// affects the objects.
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request)) {
if (!isBucketAuthorized(bucketMD, 'objectDelete', canonicalID, authInfo, log, request,
request.actionImplicitDenies)) {
log.trace("access denied due to bucket acl's");
// if access denied at the bucket level, no access for
// any of the objects so all results will be error results

View File

@ -220,14 +220,6 @@ function objectCopy(authInfo, request, sourceBucket,
versionId: sourceVersionId,
getDeleteMarker: true,
requestType: 'objectGet',
/**
* Authorization will first check the target object, with an objectPut
* action. But in this context, the source object metadata is still
* unknown. In the context of quotas, to know the number of bytes that
* are being written, we explicitly enable the quota evaluation logic
* during the objectGet action instead.
*/
checkQuota: true,
request,
};
const valPutParams = {
@ -235,7 +227,6 @@ function objectCopy(authInfo, request, sourceBucket,
bucketName: destBucketName,
objectKey: destObjectKey,
requestType: 'objectPut',
checkQuota: false,
request,
};
const dataStoreContext = {
@ -249,7 +240,7 @@ function objectCopy(authInfo, request, sourceBucket,
const responseHeaders = {};
if (request.headers['x-amz-storage-class'] &&
!config.locationConstraints[request.headers['x-amz-storage-class']]) {
!constants.validStorageClasses.includes(request.headers['x-amz-storage-class'])) {
log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', destBucketName,
errors.InvalidStorageClass.code, 'copyObject');
@ -287,10 +278,7 @@ function objectCopy(authInfo, request, sourceBucket,
});
},
function checkSourceAuthorization(destBucketMD, destObjMD, next) {
return standardMetadataValidateBucketAndObj({
...valGetParams,
destObjMD,
}, request.actionImplicitDenies, log,
return standardMetadataValidateBucketAndObj(valGetParams, request.actionImplicitDenies, log,
(err, sourceBucketMD, sourceObjMD) => {
if (err) {
log.debug('error validating get part of request',

View File

@ -3,7 +3,6 @@ const { errors, versioning } = require('arsenal');
const constants = require('../../constants');
const aclUtils = require('../utilities/aclUtils');
const { config } = require('../Config');
const { cleanUpBucket } = require('./apiUtils/bucket/bucketCreation');
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
const collectCorsHeaders = require('../utilities/collectCorsHeaders');
@ -72,7 +71,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
query,
} = request;
if (headers['x-amz-storage-class'] &&
!config.locationConstraints[headers['x-amz-storage-class']]) {
!constants.validStorageClasses.includes(headers['x-amz-storage-class'])) {
log.trace('invalid storage-class header');
monitoring.promMetrics('PUT', request.bucketName,
errors.InvalidStorageClass.code, 'putObject');
@ -99,7 +98,7 @@ function objectPut(authInfo, request, streamingV4Params, log, callback) {
'The encryption method specified is not supported');
const requestType = request.apiMethods || 'objectPut';
const valParams = { authInfo, bucketName, objectKey, versionId,
requestType, request, withVersionId: isPutVersion };
requestType, request };
const canonicalID = authInfo.getCanonicalID();
if (hasNonPrintables(objectKey)) {

View File

@ -14,7 +14,7 @@ const setUpCopyLocator = require('./apiUtils/object/setUpCopyLocator');
const { standardMetadataValidateBucketAndObj } = require('../metadata/metadataUtils');
const monitoring = require('../utilities/monitoringHandler');
const { verifyColdObjectAvailable } = require('./apiUtils/object/coldStorage');
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
const { validateQuotas } = require('./apiUtils/authorization/permissionChecks');
const versionIdUtils = versioning.VersionID;
@ -46,14 +46,6 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
versionId: reqVersionId,
getDeleteMarker: true,
requestType: 'objectGet',
/**
* Authorization will first check the target object, with an objectPut
* action. But in this context, the source object metadata is still
* unknown. In the context of quotas, to know the number of bytes that
* are being written, we explicitly enable the quota evaluation logic
* during the objectGet action instead.
*/
checkQuota: true,
request,
};
@ -77,7 +69,6 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
bucketName: destBucketName,
objectKey: destObjectKey,
requestType: 'objectPutPart',
checkQuota: false,
request,
};
@ -98,7 +89,6 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
objectKey: destObjectKey,
partNumber: paddedPartNumber,
uploadId,
enableQuota: true,
};
return async.waterfall([
@ -198,10 +188,11 @@ function objectPutCopyPart(authInfo, request, sourceBucket,
function _validateQuotas(dataLocator, destBucketMD,
copyObjectSize, sourceVerId,
sourceLocationConstraintName, sourceObjMD, next) {
return validateQuotas(request, destBucketMD, request.accountQuotas, valPutParams.requestType,
request.apiMethod, sourceObjMD?.['content-length'] || 0, false, log, err =>
next(err, dataLocator, destBucketMD, copyObjectSize, sourceVerId, sourceLocationConstraintName));
},
return validateQuotas(destBucketMD, request.accountQuotas, valPutParams.requestType, request.apiMethod,
sourceObjMD?.['content-length'] || 0, log, err => next(err, dataLocator, destBucketMD,
copyObjectSize, sourceVerId,
sourceLocationConstraintName));
},
// get MPU shadow bucket to get splitter based on MD version
function getMpuShadowBucket(dataLocator, destBucketMD,
copyObjectSize, sourceVerId,

View File

@ -6,7 +6,7 @@ const collectCorsHeaders = require('../utilities/collectCorsHeaders');
const constants = require('../../constants');
const { data } = require('../data/wrapper');
const { dataStore } = require('./apiUtils/object/storeObject');
const { isBucketAuthorized } =
const { isBucketAuthorized, validateQuotas } =
require('./apiUtils/authorization/permissionChecks');
const kms = require('../kms/wrapper');
const metadata = require('../metadata/wrapper');
@ -21,7 +21,6 @@ const { BackendInfo } = models;
const writeContinue = require('../utilities/writeContinue');
const { getObjectSSEConfiguration } = require('./apiUtils/bucket/bucketEncryption');
const validateChecksumHeaders = require('./apiUtils/object/validateChecksumHeaders');
const { validateQuotas } = require('./apiUtils/quotas/quotaUtils');
const skipError = new Error('skip');
@ -61,9 +60,6 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
log.debug('processing request', { method: 'objectPutPart' });
const size = request.parsedContentLength;
const putVersionId = request.headers['x-scal-s3-version-id'];
const isPutVersion = putVersionId || putVersionId === '';
if (Number.parseInt(size, 10) > constants.maximumAllowedPartSize) {
log.debug('put part size too large', { size });
monitoring.promMetrics('PUT', request.bucketName, 400,
@ -136,8 +132,9 @@ function objectPutPart(authInfo, request, streamingV4Params, log,
}
return next(null, destinationBucket);
},
(destinationBucket, next) => validateQuotas(request, destinationBucket, request.accountQuotas,
requestType, request.apiMethod, size, isPutVersion, log, err => next(err, destinationBucket)),
(destinationBucket, next) => validateQuotas(
destinationBucket, request.accountQuotas, requestType, request.apiMethod, size, log, err =>
next(err, destinationBucket)),
// Get bucket server-side encryption, if it exists.
(destinationBucket, next) => getObjectSSEConfiguration(
request.headers, destinationBucket, log,

View File

@ -1,3 +1,4 @@
const vaultclient = require('vaultclient');
const { auth } = require('arsenal');
const { config } = require('../Config');
@ -20,7 +21,6 @@ function getVaultClient(config) {
port,
https: true,
});
const vaultclient = require('vaultclient');
vaultClient = new vaultclient.Client(host, port, true, key, cert, ca);
} else {
logger.info('vaultclient configuration', {
@ -28,7 +28,6 @@ function getVaultClient(config) {
port,
https: false,
});
const vaultclient = require('vaultclient');
vaultClient = new vaultclient.Client(host, port);
}
@ -50,6 +49,10 @@ function getMemBackend(config) {
}
switch (config.backends.auth) {
case 'mem':
implName = 'vaultMem';
client = getMemBackend(config);
break;
case 'multiple':
implName = 'vaultChain';
client = new ChainBackend('s3', [
@ -57,14 +60,9 @@ case 'multiple':
getVaultClient(config),
]);
break;
case 'vault':
default: // vault
implName = 'vault';
client = getVaultClient(config);
break;
default: // mem
implName = 'vaultMem';
client = getMemBackend(config);
break;
}
module.exports = new Vault(client, implName);

View File

@ -8,6 +8,20 @@ const inMemory = require('./in_memory/backend').backend;
const file = require('./file/backend');
const KMIPClient = require('arsenal').network.kmipClient;
const Common = require('./common');
let scalityKMS;
let scalityKMSImpl;
try {
// eslint-disable-next-line import/no-unresolved
const ScalityKMS = require('scality-kms');
scalityKMS = new ScalityKMS(config.kms);
scalityKMSImpl = 'scalityKms';
} catch (error) {
logger.warn('scality kms unavailable. ' +
'Using file kms backend unless mem specified.',
{ error });
scalityKMS = file;
scalityKMSImpl = 'fileKms';
}
let client;
let implName;
@ -19,9 +33,8 @@ if (config.backends.kms === 'mem') {
client = file;
implName = 'fileKms';
} else if (config.backends.kms === 'scality') {
const ScalityKMS = require('scality-kms');
client = new ScalityKMS(config.kms);
implName = 'scalityKms';
client = scalityKMS;
implName = scalityKMSImpl;
} else if (config.backends.kms === 'kmip') {
const kmipConfig = { kmip: config.kmip };
if (!kmipConfig.kmip) {

View File

@ -0,0 +1,131 @@
/**
* Target service that should handle a message
* @readonly
* @enum {number}
*/
const MessageType = {
/** Message that contains a configuration overlay */
CONFIG_OVERLAY_MESSAGE: 1,
/** Message that requests a metrics report */
METRICS_REQUEST_MESSAGE: 2,
/** Message that contains a metrics report */
METRICS_REPORT_MESSAGE: 3,
/** Close the virtual TCP socket associated to the channel */
CHANNEL_CLOSE_MESSAGE: 4,
/** Write data to the virtual TCP socket associated to the channel */
CHANNEL_PAYLOAD_MESSAGE: 5,
};
/**
* Target service that should handle a message
* @readonly
* @enum {number}
*/
const TargetType = {
/** Let the dispatcher choose the most appropriate message */
TARGET_ANY: 0,
};
const headerSize = 3;
class ChannelMessageV0 {
/**
* @param {Buffer} buffer Message bytes
*/
constructor(buffer) {
this.messageType = buffer.readUInt8(0);
this.channelNumber = buffer.readUInt8(1);
this.target = buffer.readUInt8(2);
this.payload = buffer.slice(headerSize);
}
/**
* @returns {number} Message type
*/
getType() {
return this.messageType;
}
/**
* @returns {number} Channel number if applicable
*/
getChannelNumber() {
return this.channelNumber;
}
/**
* @returns {number} Target service, or 0 to choose automatically
*/
getTarget() {
return this.target;
}
/**
* @returns {Buffer} Message payload if applicable
*/
getPayload() {
return this.payload;
}
/**
* Creates a wire representation of a channel close message
*
* @param {number} channelId Channel number
*
* @returns {Buffer} wire representation
*/
static encodeChannelCloseMessage(channelId) {
const buf = Buffer.alloc(headerSize);
buf.writeUInt8(MessageType.CHANNEL_CLOSE_MESSAGE, 0);
buf.writeUInt8(channelId, 1);
buf.writeUInt8(TargetType.TARGET_ANY, 2);
return buf;
}
/**
* Creates a wire representation of a channel data message
*
* @param {number} channelId Channel number
* @param {Buffer} data Payload
*
* @returns {Buffer} wire representation
*/
static encodeChannelDataMessage(channelId, data) {
const buf = Buffer.alloc(data.length + headerSize);
buf.writeUInt8(MessageType.CHANNEL_PAYLOAD_MESSAGE, 0);
buf.writeUInt8(channelId, 1);
buf.writeUInt8(TargetType.TARGET_ANY, 2);
data.copy(buf, headerSize);
return buf;
}
/**
* Creates a wire representation of a metrics message
*
* @param {object} body Metrics report
*
* @returns {Buffer} wire representation
*/
static encodeMetricsReportMessage(body) {
const report = JSON.stringify(body);
const buf = Buffer.alloc(report.length + headerSize);
buf.writeUInt8(MessageType.METRICS_REPORT_MESSAGE, 0);
buf.writeUInt8(0, 1);
buf.writeUInt8(TargetType.TARGET_ANY, 2);
buf.write(report, headerSize);
return buf;
}
/**
* Protocol name used for subprotocol negociation
*/
static get protocolName() {
return 'zenko-secure-channel-v0';
}
}
module.exports = {
ChannelMessageV0,
MessageType,
TargetType,
};

View File

@ -0,0 +1,94 @@
const WebSocket = require('ws');
const arsenal = require('arsenal');
const logger = require('../utilities/logger');
const _config = require('../Config').config;
const { patchConfiguration } = require('./configuration');
const { reshapeExceptionError } = arsenal.errorUtils;
const managementAgentMessageType = {
/** Message that contains the loaded overlay */
NEW_OVERLAY: 1,
};
const CONNECTION_RETRY_TIMEOUT_MS = 5000;
function initManagementClient() {
const { host, port } = _config.managementAgent;
const ws = new WebSocket(`ws://${host}:${port}/watch`);
ws.on('open', () => {
logger.info('connected with management agent');
});
ws.on('close', (code, reason) => {
logger.info('disconnected from management agent', { reason });
setTimeout(initManagementClient, CONNECTION_RETRY_TIMEOUT_MS);
});
ws.on('error', error => {
logger.error('error on connection with management agent', { error });
});
ws.on('message', data => {
const method = 'initManagementclient::onMessage';
const log = logger.newRequestLogger();
let msg;
if (!data) {
log.error('message without data', { method });
return;
}
try {
msg = JSON.parse(data);
} catch (err) {
log.error('data is an invalid json', { method, err, data });
return;
}
if (msg.payload === undefined) {
log.error('message without payload', { method });
return;
}
if (typeof msg.messageType !== 'number') {
log.error('messageType is not an integer', {
type: typeof msg.messageType,
method,
});
return;
}
switch (msg.messageType) {
case managementAgentMessageType.NEW_OVERLAY:
patchConfiguration(msg.payload, log, err => {
if (err) {
log.error('failed to patch overlay', {
error: reshapeExceptionError(err),
method,
});
}
});
return;
default:
log.error('new overlay message with unmanaged message type', {
method,
type: msg.messageType,
});
return;
}
});
}
function isManagementAgentUsed() {
return process.env.MANAGEMENT_USE_AGENT === '1';
}
module.exports = {
managementAgentMessageType,
initManagementClient,
isManagementAgentUsed,
};

View File

@ -0,0 +1,240 @@
const arsenal = require('arsenal');
const { buildAuthDataAccount } = require('../auth/in_memory/builder');
const _config = require('../Config').config;
const metadata = require('../metadata/wrapper');
const { getStoredCredentials } = require('./credentials');
const latestOverlayVersionKey = 'configuration/overlay-version';
const managementDatabaseName = 'PENSIEVE';
const replicatorEndpoint = 'zenko-cloudserver-replicator';
const { decryptSecret } = arsenal.pensieve.credentialUtils;
const { patchLocations } = arsenal.patches.locationConstraints;
const { reshapeExceptionError } = arsenal.errorUtils;
const { replicationBackends } = require('arsenal').constants;
function overlayHasVersion(overlay) {
return overlay && overlay.version !== undefined;
}
function remoteOverlayIsNewer(cachedOverlay, remoteOverlay) {
return (overlayHasVersion(remoteOverlay) &&
(!overlayHasVersion(cachedOverlay) ||
remoteOverlay.version > cachedOverlay.version));
}
/**
* Updates the live {Config} object with the new overlay configuration.
*
* No-op if this version was already applied to the live {Config}.
*
* @param {object} newConf Overlay configuration to apply
* @param {werelogs~Logger} log Request-scoped logger
* @param {function} cb Function to call with (error, newConf)
*
* @returns {undefined}
*/
function patchConfiguration(newConf, log, cb) {
if (newConf.version === undefined) {
log.debug('no remote configuration created yet');
return process.nextTick(cb, null, newConf);
}
if (_config.overlayVersion !== undefined &&
newConf.version <= _config.overlayVersion) {
log.debug('configuration version already applied',
{ configurationVersion: newConf.version });
return process.nextTick(cb, null, newConf);
}
return getStoredCredentials(log, (err, creds) => {
if (err) {
return cb(err);
}
const accounts = [];
if (newConf.users) {
newConf.users.forEach(u => {
if (u.secretKey && u.secretKey.length > 0) {
const secretKey = decryptSecret(creds, u.secretKey);
// accountType will be service-replication or service-clueso
let serviceName;
if (u.accountType && u.accountType.startsWith('service-')) {
serviceName = u.accountType.split('-')[1];
}
const newAccount = buildAuthDataAccount(
u.accessKey, secretKey, u.canonicalId, serviceName,
u.userName);
accounts.push(newAccount.accounts[0]);
}
});
}
const restEndpoints = Object.assign({}, _config.restEndpoints);
if (newConf.endpoints) {
newConf.endpoints.forEach(e => {
restEndpoints[e.hostname] = e.locationName;
});
}
if (!restEndpoints[replicatorEndpoint]) {
restEndpoints[replicatorEndpoint] = 'us-east-1';
}
const locations = patchLocations(newConf.locations, creds, log);
if (Object.keys(locations).length !== 0) {
try {
_config.setLocationConstraints(locations);
} catch (error) {
const exceptionError = reshapeExceptionError(error);
log.error('could not apply configuration version location ' +
'constraints', { error: exceptionError,
method: 'getStoredCredentials' });
return cb(exceptionError);
}
try {
const locationsWithReplicationBackend = Object.keys(locations)
// NOTE: In Orbit, we don't need to have Scality location in our
// replication endpoind config, since we do not replicate to
// any Scality Instance yet.
.filter(key => replicationBackends
[locations[key].type])
.reduce((obj, key) => {
/* eslint no-param-reassign:0 */
obj[key] = locations[key];
return obj;
}, {});
_config.setReplicationEndpoints(
locationsWithReplicationBackend);
} catch (error) {
const exceptionError = reshapeExceptionError(error);
log.error('could not apply replication endpoints',
{ error: exceptionError, method: 'getStoredCredentials' });
return cb(exceptionError);
}
}
_config.setAuthDataAccounts(accounts);
_config.setRestEndpoints(restEndpoints);
_config.setPublicInstanceId(newConf.instanceId);
if (newConf.browserAccess) {
if (Boolean(_config.browserAccessEnabled) !==
Boolean(newConf.browserAccess.enabled)) {
_config.browserAccessEnabled =
Boolean(newConf.browserAccess.enabled);
_config.emit('browser-access-enabled-change');
}
}
_config.overlayVersion = newConf.version;
log.info('applied configuration version',
{ configurationVersion: _config.overlayVersion });
return cb(null, newConf);
});
}
/**
* Writes configuration version to the management database
*
* @param {object} cachedOverlay Latest stored configuration version
* for freshness comparison purposes
* @param {object} remoteOverlay New configuration version
* @param {werelogs~Logger} log Request-scoped logger
* @param {function} cb Function to call with (error, remoteOverlay)
*
* @returns {undefined}
*/
function saveConfigurationVersion(cachedOverlay, remoteOverlay, log, cb) {
if (remoteOverlayIsNewer(cachedOverlay, remoteOverlay)) {
const objName = `configuration/overlay/${remoteOverlay.version}`;
metadata.putObjectMD(managementDatabaseName, objName, remoteOverlay,
{}, log, error => {
if (error) {
const exceptionError = reshapeExceptionError(error);
log.error('could not save configuration',
{ error: exceptionError,
method: 'saveConfigurationVersion',
configurationVersion: remoteOverlay.version });
cb(exceptionError);
return;
}
metadata.putObjectMD(managementDatabaseName,
latestOverlayVersionKey, remoteOverlay.version, {}, log,
error => {
if (error) {
log.error('could not save configuration version', {
configurationVersion: remoteOverlay.version,
});
}
cb(error, remoteOverlay);
});
});
} else {
log.debug('no remote configuration to cache yet');
process.nextTick(cb, null, remoteOverlay);
}
}
/**
* Loads the latest cached configuration overlay from the management
* database, without contacting the Orbit API.
*
* @param {werelogs~Logger} log Request-scoped logger
* @param {function} callback Function called with (error, cachedOverlay)
*
* @returns {undefined}
*/
function loadCachedOverlay(log, callback) {
return metadata.getObjectMD(managementDatabaseName,
latestOverlayVersionKey, {}, log, (err, version) => {
if (err) {
if (err.is.NoSuchKey) {
return process.nextTick(callback, null, {});
}
return callback(err);
}
return metadata.getObjectMD(managementDatabaseName,
`configuration/overlay/${version}`, {}, log, (err, conf) => {
if (err) {
if (err.is.NoSuchKey) {
return process.nextTick(callback, null, {});
}
return callback(err);
}
return callback(null, conf);
});
});
}
function applyAndSaveOverlay(overlay, log) {
patchConfiguration(overlay, log, err => {
if (err) {
log.error('could not apply pushed overlay', {
error: reshapeExceptionError(err),
method: 'applyAndSaveOverlay',
});
return;
}
saveConfigurationVersion(null, overlay, log, err => {
if (err) {
log.error('could not cache overlay version', {
error: reshapeExceptionError(err),
method: 'applyAndSaveOverlay',
});
return;
}
log.info('overlay push processed');
});
});
}
module.exports = {
loadCachedOverlay,
managementDatabaseName,
patchConfiguration,
saveConfigurationVersion,
remoteOverlayIsNewer,
applyAndSaveOverlay,
};

View File

@ -0,0 +1,145 @@
const arsenal = require('arsenal');
const forge = require('node-forge');
const request = require('../utilities/request');
const metadata = require('../metadata/wrapper');
const managementDatabaseName = 'PENSIEVE';
const tokenConfigurationKey = 'auth/zenko/remote-management-token';
const tokenRotationDelay = 3600 * 24 * 7 * 1000; // 7 days
const { reshapeExceptionError } = arsenal.errorUtils;
/**
* Retrieves Orbit API token from the management database.
*
* The token is used to authenticate stat posting and
*
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
* initialization process
* @param {function} callback Function called with (error, result)
*
* @returns {undefined}
*/
function getStoredCredentials(log, callback) {
metadata.getObjectMD(managementDatabaseName, tokenConfigurationKey, {},
log, callback);
}
function issueCredentials(managementEndpoint, instanceId, log, callback) {
log.info('registering with API to get token');
const keyPair = forge.pki.rsa.generateKeyPair({ bits: 2048, e: 0x10001 });
const privateKey = forge.pki.privateKeyToPem(keyPair.privateKey);
const publicKey = forge.pki.publicKeyToPem(keyPair.publicKey);
const postData = {
publicKey,
};
request.post(`${managementEndpoint}/${instanceId}/register`,
{ body: postData, json: true }, (error, response, body) => {
if (error) {
return callback(error);
}
if (response.statusCode !== 201) {
log.error('could not register instance', {
statusCode: response.statusCode,
});
return callback(arsenal.errors.InternalError);
}
/* eslint-disable no-param-reassign */
body.privateKey = privateKey;
/* eslint-enable no-param-reassign */
return callback(null, body);
});
}
function confirmInstanceCredentials(
managementEndpoint, instanceId, creds, log, callback) {
const postData = {
serial: creds.serial || 0,
publicKey: creds.publicKey,
};
const opts = {
headers: {
'x-instance-authentication-token': creds.token,
},
body: postData,
};
request.post(`${managementEndpoint}/${instanceId}/confirm`,
opts, (error, response) => {
if (error) {
return callback(error);
}
if (response.statusCode === 200) {
return callback(null, instanceId, creds.token);
}
return callback(arsenal.errors.InternalError);
});
}
/**
* Initializes credentials and PKI in the management database.
*
* In case the management database is new and empty, the instance
* is registered as new against the Orbit API with newly-generated
* RSA key pair.
*
* @param {string} managementEndpoint API endpoint
* @param {string} instanceId UUID of this deployment
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
* initialization process
* @param {function} callback Function called with (error, result)
*
* @returns {undefined}
*/
function initManagementCredentials(
managementEndpoint, instanceId, log, callback) {
getStoredCredentials(log, (error, value) => {
if (error) {
if (error.is.NoSuchKey) {
return issueCredentials(managementEndpoint, instanceId, log,
(error, value) => {
if (error) {
log.error('could not issue token',
{ error: reshapeExceptionError(error),
method: 'initManagementCredentials' });
return callback(error);
}
log.debug('saving token');
return metadata.putObjectMD(managementDatabaseName,
tokenConfigurationKey, value, {}, log, error => {
if (error) {
log.error('could not save token',
{ error: reshapeExceptionError(error),
method: 'initManagementCredentials',
});
return callback(error);
}
log.info('saved token locally, ' +
'confirming instance');
return confirmInstanceCredentials(
managementEndpoint, instanceId, value, log,
callback);
});
});
}
log.debug('could not get token', { error });
return callback(error);
}
log.info('returning existing token');
if (Date.now() - value.issueDate > tokenRotationDelay) {
log.warn('management API token is too old, should re-issue');
}
return callback(null, instanceId, value.token);
});
}
module.exports = {
getStoredCredentials,
initManagementCredentials,
};

138
lib/management/index.js Normal file
View File

@ -0,0 +1,138 @@
const arsenal = require('arsenal');
const async = require('async');
const metadata = require('../metadata/wrapper');
const logger = require('../utilities/logger');
const {
loadCachedOverlay,
managementDatabaseName,
patchConfiguration,
} = require('./configuration');
const { initManagementCredentials } = require('./credentials');
const { startWSManagementClient } = require('./push');
const { startPollingManagementClient } = require('./poll');
const { reshapeExceptionError } = arsenal.errorUtils;
const { isManagementAgentUsed } = require('./agentClient');
const initRemoteManagementRetryDelay = 10000;
const managementEndpointRoot =
process.env.MANAGEMENT_ENDPOINT ||
'https://api.zenko.io';
const managementEndpoint = `${managementEndpointRoot}/api/v1/instance`;
const pushEndpointRoot =
process.env.PUSH_ENDPOINT ||
'https://push.api.zenko.io';
const pushEndpoint = `${pushEndpointRoot}/api/v1/instance`;
function initManagementDatabase(log, callback) {
// XXX choose proper owner names
const md = new arsenal.models.BucketInfo(managementDatabaseName, 'owner',
'owner display name', new Date().toJSON());
metadata.createBucket(managementDatabaseName, md, log, error => {
if (error) {
if (error.is.BucketAlreadyExists) {
log.info('created management database');
return callback();
}
log.error('could not initialize management database',
{ error: reshapeExceptionError(error),
method: 'initManagementDatabase' });
return callback(error);
}
log.info('initialized management database');
return callback();
});
}
function startManagementListeners(instanceId, token) {
const mode = process.env.MANAGEMENT_MODE || 'push';
if (mode === 'push') {
const url = `${pushEndpoint}/${instanceId}/ws`;
startWSManagementClient(url, token);
} else {
startPollingManagementClient(managementEndpoint, instanceId, token);
}
}
/**
* Initializes Orbit-based management by:
* - creating the management database in metadata
* - generating a key pair for credentials encryption
* - generating an instance-unique ID
* - getting an authentication token for the API
* - loading and applying the latest cached overlay configuration
* - starting a configuration update and metrics push background task
*
* @param {werelogs~Logger} log Request-scoped logger to be able to trace
* initialization process
* @param {function} callback Function to call once the overlay is loaded
* (overlay)
*
* @returns {undefined}
*/
function initManagement(log, callback) {
if ((process.env.REMOTE_MANAGEMENT_DISABLE &&
process.env.REMOTE_MANAGEMENT_DISABLE !== '0')
|| process.env.S3BACKEND === 'mem') {
log.info('remote management disabled');
return;
}
/* Temporary check before to fully move to the process management agent. */
if (isManagementAgentUsed() ^ typeof callback === 'function') {
let msg = 'misuse of initManagement function: ';
msg += `MANAGEMENT_USE_AGENT: ${process.env.MANAGEMENT_USE_AGENT}`;
msg += `, callback type: ${typeof callback}`;
throw new Error(msg);
}
async.waterfall([
// eslint-disable-next-line arrow-body-style
cb => { return isManagementAgentUsed() ? metadata.setup(cb) : cb(); },
cb => initManagementDatabase(log, cb),
cb => metadata.getUUID(log, cb),
(instanceId, cb) => initManagementCredentials(
managementEndpoint, instanceId, log, cb),
(instanceId, token, cb) => {
if (!isManagementAgentUsed()) {
cb(null, instanceId, token, {});
return;
}
loadCachedOverlay(log, (err, overlay) => cb(err, instanceId,
token, overlay));
},
(instanceId, token, overlay, cb) => {
if (!isManagementAgentUsed()) {
cb(null, instanceId, token, overlay);
return;
}
patchConfiguration(overlay, log,
err => cb(err, instanceId, token, overlay));
},
], (error, instanceId, token, overlay) => {
if (error) {
log.error('could not initialize remote management, retrying later',
{ error: reshapeExceptionError(error),
method: 'initManagement' });
setTimeout(initManagement,
initRemoteManagementRetryDelay,
logger.newRequestLogger());
} else {
log.info(`this deployment's Instance ID is ${instanceId}`);
log.end('management init done');
startManagementListeners(instanceId, token);
if (callback) {
callback(overlay);
}
}
});
}
module.exports = {
initManagement,
initManagementDatabase,
};

157
lib/management/poll.js Normal file
View File

@ -0,0 +1,157 @@
const arsenal = require('arsenal');
const async = require('async');
const request = require('../utilities/request');
const _config = require('../Config').config;
const logger = require('../utilities/logger');
const metadata = require('../metadata/wrapper');
const {
loadCachedOverlay,
patchConfiguration,
saveConfigurationVersion,
} = require('./configuration');
const { reshapeExceptionError } = arsenal.errorUtils;
const pushReportDelay = 30000;
const pullConfigurationOverlayDelay = 60000;
function loadRemoteOverlay(
managementEndpoint, instanceId, remoteToken, cachedOverlay, log, cb) {
log.debug('loading remote overlay');
const opts = {
headers: {
'x-instance-authentication-token': remoteToken,
'x-scal-request-id': log.getSerializedUids(),
},
json: true,
};
request.get(`${managementEndpoint}/${instanceId}/config/overlay`, opts,
(error, response, body) => {
if (error) {
return cb(error);
}
if (response.statusCode === 200) {
return cb(null, cachedOverlay, body);
}
if (response.statusCode === 404) {
return cb(null, cachedOverlay, {});
}
return cb(arsenal.errors.AccessForbidden, cachedOverlay, {});
});
}
// TODO save only after successful patch
function applyConfigurationOverlay(
managementEndpoint, instanceId, remoteToken, log) {
async.waterfall([
wcb => loadCachedOverlay(log, wcb),
(cachedOverlay, wcb) => patchConfiguration(cachedOverlay,
log, wcb),
(cachedOverlay, wcb) =>
loadRemoteOverlay(managementEndpoint, instanceId, remoteToken,
cachedOverlay, log, wcb),
(cachedOverlay, remoteOverlay, wcb) =>
saveConfigurationVersion(cachedOverlay, remoteOverlay, log, wcb),
(remoteOverlay, wcb) => patchConfiguration(remoteOverlay,
log, wcb),
], error => {
if (error) {
log.error('could not apply managed configuration',
{ error: reshapeExceptionError(error),
method: 'applyConfigurationOverlay' });
}
setTimeout(applyConfigurationOverlay, pullConfigurationOverlayDelay,
managementEndpoint, instanceId, remoteToken,
logger.newRequestLogger());
});
}
function postStats(managementEndpoint, instanceId, remoteToken, report, next) {
const toURL = `${managementEndpoint}/${instanceId}/stats`;
const toOptions = {
json: true,
headers: {
'content-type': 'application/json',
'x-instance-authentication-token': remoteToken,
},
body: report,
};
const toCallback = (err, response, body) => {
if (err) {
logger.info('could not post stats', { error: err });
}
if (response && response.statusCode !== 201) {
logger.info('could not post stats', {
body,
statusCode: response.statusCode,
});
}
if (next) {
next(null, instanceId, remoteToken);
}
};
return request.post(toURL, toOptions, toCallback);
}
function getStats(next) {
const fromURL = `http://localhost:${_config.port}/_/report`;
const fromOptions = {
headers: {
'x-scal-report-token': process.env.REPORT_TOKEN,
},
};
return request.get(fromURL, fromOptions, next);
}
function pushStats(managementEndpoint, instanceId, remoteToken, next) {
if (process.env.PUSH_STATS === 'false') {
return;
}
getStats((err, res, report) => {
if (err) {
logger.info('could not retrieve stats', { error: err });
return;
}
logger.debug('report', { report });
postStats(
managementEndpoint,
instanceId,
remoteToken,
report,
next
);
return;
});
setTimeout(pushStats, pushReportDelay,
managementEndpoint, instanceId, remoteToken);
}
/**
* Starts background task that updates configuration and pushes stats.
*
* Periodically polls for configuration updates, and pushes stats at
* a fixed interval.
*
* @param {string} managementEndpoint API endpoint
* @param {string} instanceId UUID of this deployment
* @param {string} remoteToken API authentication token
*
* @returns {undefined}
*/
function startPollingManagementClient(
managementEndpoint, instanceId, remoteToken) {
metadata.notifyBucketChange(() => {
pushStats(managementEndpoint, instanceId, remoteToken);
});
pushStats(managementEndpoint, instanceId, remoteToken);
applyConfigurationOverlay(managementEndpoint, instanceId, remoteToken,
logger.newRequestLogger());
}
module.exports = {
startPollingManagementClient,
};

301
lib/management/push.js Normal file
View File

@ -0,0 +1,301 @@
const arsenal = require('arsenal');
const HttpsProxyAgent = require('https-proxy-agent');
const net = require('net');
const request = require('../utilities/request');
const { URL } = require('url');
const WebSocket = require('ws');
const assert = require('assert');
const http = require('http');
const _config = require('../Config').config;
const logger = require('../utilities/logger');
const metadata = require('../metadata/wrapper');
const { reshapeExceptionError } = arsenal.errorUtils;
const { isManagementAgentUsed } = require('./agentClient');
const { applyAndSaveOverlay } = require('./configuration');
const {
ChannelMessageV0,
MessageType,
} = require('./ChannelMessageV0');
const {
CONFIG_OVERLAY_MESSAGE,
METRICS_REQUEST_MESSAGE,
CHANNEL_CLOSE_MESSAGE,
CHANNEL_PAYLOAD_MESSAGE,
} = MessageType;
const PING_INTERVAL_MS = 10000;
const subprotocols = [ChannelMessageV0.protocolName];
const cloudServerHost = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_HOST
|| 'localhost';
const cloudServerPort = process.env.SECURE_CHANNEL_DEFAULT_FORWARD_TO_PORT
|| _config.port;
let overlayMessageListener = null;
let connected = false;
// No wildcard nor cidr/mask match for now
function createWSAgent(pushEndpoint, env, log) {
const url = new URL(pushEndpoint);
const noProxy = (env.NO_PROXY || env.no_proxy
|| '').split(',');
if (noProxy.includes(url.hostname)) {
log.info('push server ws has proxy exclusion', { noProxy });
return null;
}
if (url.protocol === 'https:' || url.protocol === 'wss:') {
const httpsProxy = (env.HTTPS_PROXY || env.https_proxy);
if (httpsProxy) {
log.info('push server ws using https proxy', { httpsProxy });
return new HttpsProxyAgent(httpsProxy);
}
} else if (url.protocol === 'http:' || url.protocol === 'ws:') {
const httpProxy = (env.HTTP_PROXY || env.http_proxy);
if (httpProxy) {
log.info('push server ws using http proxy', { httpProxy });
return new HttpsProxyAgent(httpProxy);
}
}
const allProxy = (env.ALL_PROXY || env.all_proxy);
if (allProxy) {
log.info('push server ws using wildcard proxy', { allProxy });
return new HttpsProxyAgent(allProxy);
}
log.info('push server ws not using proxy');
return null;
}
/**
* Starts background task that updates configuration and pushes stats.
*
* Receives pushed Websocket messages on configuration updates, and
* sends stat messages in response to API sollicitations.
*
* @param {string} url API endpoint
* @param {string} token API authentication token
* @param {function} cb end-of-connection callback
*
* @returns {undefined}
*/
function startWSManagementClient(url, token, cb) {
logger.info('connecting to push server', { url });
function _logError(error, errorMessage, method) {
if (error) {
logger.error(`management client error: ${errorMessage}`,
{ error: reshapeExceptionError(error), method });
}
}
const socketsByChannelId = [];
const headers = {
'x-instance-authentication-token': token,
};
const agent = createWSAgent(url, process.env, logger);
const ws = new WebSocket(url, subprotocols, { headers, agent });
let pingTimeout = null;
function sendPing() {
if (ws.readyState === ws.OPEN) {
ws.ping(err => _logError(err, 'failed to send a ping', 'sendPing'));
}
pingTimeout = setTimeout(() => ws.terminate(), PING_INTERVAL_MS);
}
function initiatePing() {
clearTimeout(pingTimeout);
setTimeout(sendPing, PING_INTERVAL_MS);
}
function pushStats(options) {
if (process.env.PUSH_STATS === 'false') {
return;
}
const fromURL = `http://${cloudServerHost}:${cloudServerPort}/_/report`;
const fromOptions = {
json: true,
headers: {
'x-scal-report-token': process.env.REPORT_TOKEN,
'x-scal-report-skip-cache': Boolean(options && options.noCache),
},
};
request.get(fromURL, fromOptions, (err, response, body) => {
if (err) {
_logError(err, 'failed to get metrics report', 'pushStats');
return;
}
ws.send(ChannelMessageV0.encodeMetricsReportMessage(body),
err => _logError(err, 'failed to send metrics report message',
'pushStats'));
});
}
function closeChannel(channelId) {
const socket = socketsByChannelId[channelId];
if (socket) {
socket.destroy();
delete socketsByChannelId[channelId];
}
}
function receiveChannelData(channelId, payload) {
let socket = socketsByChannelId[channelId];
if (!socket) {
socket = net.createConnection(cloudServerPort, cloudServerHost);
socket.on('data', data => {
ws.send(ChannelMessageV0.
encodeChannelDataMessage(channelId, data), err =>
_logError(err, 'failed to send channel data message',
'receiveChannelData'));
});
socket.on('connect', () => {
});
socket.on('drain', () => {
});
socket.on('error', error => {
logger.error('failed to connect to S3', {
code: error.code,
host: error.address,
port: error.port,
});
});
socket.on('end', () => {
socket.destroy();
socketsByChannelId[channelId] = null;
ws.send(ChannelMessageV0.encodeChannelCloseMessage(channelId),
err => _logError(err,
'failed to send channel close message',
'receiveChannelData'));
});
socketsByChannelId[channelId] = socket;
}
socket.write(payload);
}
function browserAccessChangeHandler() {
if (!_config.browserAccessEnabled) {
socketsByChannelId.forEach(s => s.close());
}
}
ws.on('open', () => {
connected = true;
logger.info('connected to push server');
metadata.notifyBucketChange(() => {
pushStats({ noCache: true });
});
_config.on('browser-access-enabled-change', browserAccessChangeHandler);
initiatePing();
});
const cbOnce = cb ? arsenal.jsutil.once(cb) : null;
ws.on('close', () => {
logger.info('disconnected from push server, reconnecting in 10s');
metadata.notifyBucketChange(null);
_config.removeListener('browser-access-enabled-change',
browserAccessChangeHandler);
setTimeout(startWSManagementClient, 10000, url, token);
connected = false;
if (cbOnce) {
process.nextTick(cbOnce);
}
});
ws.on('error', err => {
connected = false;
logger.error('error from push server connection', {
error: err,
errorMessage: err.message,
});
if (cbOnce) {
process.nextTick(cbOnce, err);
}
});
ws.on('ping', () => {
ws.pong(err => _logError(err, 'failed to send a pong'));
});
ws.on('pong', () => {
initiatePing();
});
ws.on('message', data => {
const log = logger.newRequestLogger();
const message = new ChannelMessageV0(data);
switch (message.getType()) {
case CONFIG_OVERLAY_MESSAGE:
if (!isManagementAgentUsed()) {
applyAndSaveOverlay(JSON.parse(message.getPayload()), log);
} else {
if (overlayMessageListener) {
overlayMessageListener(message.getPayload().toString());
}
}
break;
case METRICS_REQUEST_MESSAGE:
pushStats();
break;
case CHANNEL_CLOSE_MESSAGE:
closeChannel(message.getChannelNumber());
break;
case CHANNEL_PAYLOAD_MESSAGE:
// browserAccessEnabled defaults to true unless explicitly false
if (_config.browserAccessEnabled !== false) {
receiveChannelData(
message.getChannelNumber(), message.getPayload());
}
break;
default:
logger.error('unknown message type from push server',
{ messageType: message.getType() });
}
});
}
function addOverlayMessageListener(callback) {
assert(typeof callback === 'function');
overlayMessageListener = callback;
}
function startPushConnectionHealthCheckServer(cb) {
const server = http.createServer((req, res) => {
if (req.url !== '/_/healthcheck') {
res.writeHead(404);
res.write('Not Found');
} else if (connected) {
res.writeHead(200);
res.write('Connected');
} else {
res.writeHead(503);
res.write('Not Connected');
}
res.end();
});
server.listen(_config.port, cb);
}
module.exports = {
createWSAgent,
startWSManagementClient,
startPushConnectionHealthCheckServer,
addOverlayMessageListener,
};

View File

@ -3,12 +3,10 @@ const { errors } = require('arsenal');
const metadata = require('./wrapper');
const BucketInfo = require('arsenal').models.BucketInfo;
const { isBucketAuthorized, isObjAuthorized } =
const { isBucketAuthorized, isObjAuthorized, validateQuotas } =
require('../api/apiUtils/authorization/permissionChecks');
const bucketShield = require('../api/apiUtils/bucket/bucketShield');
const { onlyOwnerAllowed } = require('../../constants');
const { actionNeedQuotaCheck, actionWithDataDeletion } = require('arsenal/build/lib/policyEvaluator/RequestContext');
const { processBytesToWrite, validateQuotas } = require('../api/apiUtils/quotas/quotaUtils');
/** getNullVersionFromMaster - retrieves the null version
* metadata via retrieving the master key
@ -184,7 +182,7 @@ function validateBucket(bucket, params, log, actionImplicitDenies = {}) {
* @return {undefined} - and call callback with params err, bucket md
*/
function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log, callback) {
const { authInfo, bucketName, objectKey, versionId, getDeleteMarker, request, withVersionId } = params;
const { authInfo, bucketName, objectKey, versionId, getDeleteMarker, request } = params;
let requestType = params.requestType;
if (!Array.isArray(requestType)) {
requestType = [requestType];
@ -229,6 +227,22 @@ function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log,
}
return next(null, bucket, objMD);
},
(bucket, objMD, next) => {
let contentLength = request?.parsedContentLength || 0;
if (!contentLength && objMD?.['content-length']) {
// object is being deleted
contentLength = -Number.parseInt(objMD['content-length'], 10);
} else if (request.apiMethod === 'objectRestore') {
// object is being restored
contentLength = Number.parseInt(objMD['content-length'], 10);
} else if (contentLength && objMD?.['content-length']) {
// object is being replaced: store the diff
contentLength = Number.parseInt(objMD['content-length'], 10) - contentLength;
}
// Otherwise, object is either written or will be filtered out when evaluating the quota against actions
return validateQuotas(bucket, request.accountQuotas, requestType, request.apiMethod,
contentLength, log, err => next(err, bucket, objMD));
},
(bucket, objMD, next) => {
const canonicalID = authInfo.getCanonicalID();
if (!isObjAuthorized(bucket, objMD, requestType, canonicalID, authInfo, log, request,
@ -238,21 +252,6 @@ function standardMetadataValidateBucketAndObj(params, actionImplicitDenies, log,
}
return next(null, bucket, objMD);
},
(bucket, objMD, next) => {
const needQuotaCheck = requestType => requestType.some(type => actionNeedQuotaCheck[type] ||
actionWithDataDeletion[type]);
const checkQuota = params.checkQuota === undefined ? needQuotaCheck(requestType) : params.checkQuota;
// withVersionId cover cases when an object is being restored with a specific version ID.
// In this case, the storage space was already accounted for when the RestoreObject API call
// was made, so we don't need to add any inflight, but quota must be evaluated.
if (!checkQuota) {
return next(null, bucket, objMD);
}
const contentLength = processBytesToWrite(request.apiMethod, bucket, versionId,
request?.parsedContentLength || 0, objMD, params.destObjMD);
return validateQuotas(request, bucket, request.accountQuotas, requestType, request.apiMethod,
contentLength, withVersionId, log, err => next(err, bucket, objMD));
},
], (err, bucket, objMD) => {
if (err) {
// still return bucket for cors headers
@ -294,7 +293,6 @@ module.exports = {
validateBucket,
metadataGetObject,
metadataGetObjects,
processBytesToWrite,
standardMetadataValidateBucketAndObj,
standardMetadataValidateBucket,
};

View File

@ -2,9 +2,9 @@ const MetadataWrapper = require('arsenal').storage.metadata.MetadataWrapper;
const { config } = require('../Config');
const logger = require('../utilities/logger');
const constants = require('../../constants');
const bucketclient = require('bucketclient');
const clientName = config.backends.metadata;
let bucketclient;
let params;
if (clientName === 'mem') {
params = {};
@ -21,7 +21,6 @@ if (clientName === 'mem') {
noDbOpen: null,
};
} else if (clientName === 'scality') {
bucketclient = require('bucketclient');
params = {
bucketdBootstrap: config.bucketd.bootstrap,
bucketdLog: config.bucketd.log,

View File

@ -1,17 +0,0 @@
const { config } = require('../Config');
const { ScubaClientImpl } = require('./scuba/wrapper');
let instance = null;
switch (config.backends.quota) {
case 'scuba':
instance = new ScubaClientImpl(config);
break;
default:
instance = {
enabled: false,
};
break;
}
module.exports = instance;

View File

@ -1,16 +1,13 @@
const util = require('util');
const { default: ScubaClient } = require('scubaclient');
const { externalBackendHealthCheckInterval } = require('../../../constants');
const monitoring = require('../../utilities/monitoringHandler');
const { config } = require('../Config');
const { externalBackendHealthCheckInterval } = require('../../constants');
class ScubaClientImpl extends ScubaClient {
constructor(config) {
super(config.scuba);
this.enabled = false;
this.maxStaleness = config.quota.maxStaleness;
this._healthCheckTimer = null;
this._log = null;
this._getLatestMetricsCallback = util.callbackify(this.getLatestMetrics);
if (config.scuba) {
this.enabled = true;
@ -27,17 +24,10 @@ class ScubaClientImpl extends ScubaClient {
}
_healthCheck() {
return this.healthCheck().then(data => {
if (data?.date) {
const date = new Date(data.date);
if (Date.now() - date.getTime() > this.maxStaleness) {
throw new Error('Data is stale, disabling quotas');
}
}
return this.healthCheck().then(() => {
if (!this.enabled) {
this._log.info('Scuba health check passed, enabling quotas');
}
monitoring.utilizationServiceAvailable.set(1);
this.enabled = true;
}).catch(err => {
if (this.enabled) {
@ -46,7 +36,6 @@ class ScubaClientImpl extends ScubaClient {
description: err.message,
});
}
monitoring.utilizationServiceAvailable.set(0);
this.enabled = false;
});
}
@ -61,20 +50,11 @@ class ScubaClientImpl extends ScubaClient {
}, Number(process.env.SCUBA_HEALTHCHECK_FREQUENCY)
|| externalBackendHealthCheckInterval);
}
getUtilizationMetrics(metricsClass, resourceName, options, body, callback) {
const requestStartTime = process.hrtime.bigint();
return this._getLatestMetricsCallback(metricsClass, resourceName, options, body, (err, data) => {
const responseTimeInNs = Number(process.hrtime.bigint() - requestStartTime);
monitoring.utilizationMetricsRetrievalDuration.labels({
code: err ? (err.statusCode || 500) : 200,
class: metricsClass,
}).observe(responseTimeInNs / 1e9);
return callback(err, data);
});
}
}
const ScubaClientInstance = new ScubaClientImpl(config);
module.exports = {
ScubaClientInstance,
ScubaClientImpl,
};

View File

@ -18,9 +18,14 @@ const locationStorageCheck =
require('./api/apiUtils/object/locationStorageCheck');
const vault = require('./auth/vault');
const metadata = require('./metadata/wrapper');
const { initManagement } = require('./management');
const {
initManagementClient,
isManagementAgentUsed,
} = require('./management/agentClient');
const HttpAgent = require('agentkeepalive');
const QuotaService = require('./quotas/quotas');
const { ScubaClientInstance } = require('./scuba/wrapper');
const routes = arsenal.s3routes.routes;
const { parseLC, MultipleBackendGateway } = arsenal.storage.data;
const websiteEndpoints = _config.websiteEndpoints;
@ -51,6 +56,7 @@ const STATS_INTERVAL = 5; // 5 seconds
const STATS_EXPIRY = 30; // 30 seconds
const statsClient = new StatsClient(localCacheClient, STATS_INTERVAL,
STATS_EXPIRY);
const enableRemoteManagement = true;
class S3Server {
/**
@ -316,9 +322,19 @@ class S3Server {
this._startServer(this.routeAdminRequest, _config.metricsPort);
}
// Start quota service health checks
if (QuotaService.enabled) {
QuotaService?.setup(log);
// Start ScubaClient health checks
ScubaClientInstance.setup(log);
// TODO this should wait for metadata healthcheck to be ok
// TODO only do this in cluster master
if (enableRemoteManagement) {
if (!isManagementAgentUsed()) {
setTimeout(() => {
initManagement(logger.newRequestLogger());
}, 5000);
} else {
initManagementClient();
}
}
this.started = true;
@ -327,7 +343,8 @@ class S3Server {
}
function main() {
let workers = _config.workers || 1;
// TODO: change config to use workers prop. name for clarity
let workers = _config.clusters || 1;
if (process.env.S3BACKEND === 'mem') {
workers = 1;
}

View File

@ -1,12 +1,16 @@
require('werelogs').stderrUtils.catchAndTimestampStderr();
const _config = require('../Config').config;
const { utapiVersion, UtapiServer: utapiServer } = require('utapi');
const vault = require('../auth/vault');
// start utapi server
if (utapiVersion === 1 && _config.utapi) {
const fullConfig = Object.assign({}, _config.utapi,
{ redis: _config.redis, vaultclient: vault });
{ redis: _config.redis });
if (_config.vaultd) {
Object.assign(fullConfig, { vaultd: _config.vaultd });
}
if (_config.https) {
Object.assign(fullConfig, { https: _config.https });
}
// copy healthcheck IPs
if (_config.healthChecks) {
Object.assign(fullConfig, { healthChecks: _config.healthChecks });

View File

@ -1,4 +1,3 @@
require('werelogs').stderrUtils.catchAndTimestampStderr();
const UtapiReindex = require('utapi').UtapiReindex;
const { config } = require('../Config');

View File

@ -1,4 +1,3 @@
require('werelogs').stderrUtils.catchAndTimestampStderr();
const UtapiReplay = require('utapi').UtapiReplay;
const _config = require('../Config').config;

View File

@ -1,11 +1,7 @@
const { configure, Werelogs } = require('werelogs');
const { Werelogs } = require('werelogs');
const _config = require('../Config.js').config;
configure({
level: _config.log.logLevel,
dump: _config.log.dumpLevel,
});
const werelogs = new Werelogs({
level: _config.log.logLevel,
dump: _config.log.dumpLevel,

View File

@ -1,6 +1,5 @@
const { errors } = require('arsenal');
const client = require('prom-client');
const { config } = require('../Config');
const collectDefaultMetrics = client.collectDefaultMetrics;
const numberOfBuckets = new client.Gauge({
@ -65,49 +64,6 @@ const httpResponseSizeBytes = new client.Summary({
help: 'Cloudserver HTTP response sizes in bytes',
});
let quotaEvaluationDuration;
let utilizationMetricsRetrievalDuration;
let utilizationServiceAvailable;
let bucketsWithQuota;
let accountsWithQuota;
let requestWithQuotaMetricsUnavailable;
if (config.isQuotaEnabled) {
quotaEvaluationDuration = new client.Histogram({
name: 's3_cloudserver_quota_evaluation_duration_seconds',
help: 'Duration of the quota evaluation operation',
labelNames: ['action', 'code', 'type'],
buckets: [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5, 1],
});
utilizationMetricsRetrievalDuration = new client.Histogram({
name: 's3_cloudserver_quota_metrics_retrieval_duration_seconds',
help: 'Duration of the utilization metrics retrieval operation',
labelNames: ['code', 'class'],
buckets: [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.2, 0.5],
});
utilizationServiceAvailable = new client.Gauge({
name: 's3_cloudserver_quota_utilization_service_available',
help: 'Availability of the utilization service',
});
bucketsWithQuota = new client.Gauge({
name: 's3_cloudserver_quota_buckets_count',
help: 'Total number of buckets quota',
});
accountsWithQuota = new client.Gauge({
name: 's3_cloudserver_quota_accounts_count',
help: 'Total number of account quota',
});
requestWithQuotaMetricsUnavailable = new client.Counter({
name: 's3_cloudserver_quota_unavailable_count',
help: 'Total number of requests with quota metrics unavailable',
});
}
// Lifecycle duration metric, to track the completion of restore.
// This metric is used to track the time it takes to complete the lifecycle operation (restore).
// NOTE : this metric is the same as the one defined in Backbeat, and must keep the same name,
@ -187,10 +143,6 @@ function crrCacheToProm(crrResults) {
numberOfBuckets.set(crrResults.getObjectCount.buckets || 0);
numberOfObjects.set(crrResults.getObjectCount.objects || 0);
}
if (config.isQuotaEnabled) {
bucketsWithQuota.set(crrResults?.getObjectCount?.bucketWithQuotaCount || 0);
accountsWithQuota.set(crrResults?.getVaultReport?.accountWithQuotaCount || 0);
}
if (crrResults.getDataDiskUsage) {
dataDiskAvailable.set(crrResults.getDataDiskUsage.available || 0);
dataDiskFree.set(crrResults.getDataDiskUsage.free || 0);
@ -268,9 +220,4 @@ module.exports = {
httpRequestsTotal,
httpActiveRequests,
lifecycleDuration,
quotaEvaluationDuration,
utilizationMetricsRetrievalDuration,
utilizationServiceAvailable,
bucketsWithQuota,
requestWithQuotaMetricsUnavailable,
};

View File

@ -10,7 +10,6 @@ const config = require('../Config').config;
const { data } = require('../data/wrapper');
const metadata = require('../metadata/wrapper');
const monitoring = require('../utilities/monitoringHandler');
const vault = require('../auth/vault');
const REPORT_MODEL_VERSION = 1;
const ASYNCLIMIT = 5;
@ -462,7 +461,6 @@ function reportHandler(clientIP, req, res, log) {
getCRRMetrics: cb => getCRRMetrics(log, cb),
getReplicationStates: cb => getReplicationStates(log, cb),
getIngestionInfo: cb => getIngestionInfo(log, cb),
getVaultReport: cb => vault.report(log, cb),
},
(err, results) => {
if (err) {
@ -490,7 +488,6 @@ function reportHandler(clientIP, req, res, log) {
capabilities: getCapabilities(),
ingestStats: results.getIngestionInfo.metrics,
ingestStatus: results.getIngestionInfo.status,
vaultReport: results.getVaultReport,
};
monitoring.crrCacheToProm(results);
res.writeHead(200, { 'Content-Type': 'application/json' });

View File

@ -1,12 +0,0 @@
{
"STANDARD": {
"type": "vitastor",
"objectId": "std",
"legacyAwsBehavior": true,
"details": {
"config_path": "/etc/vitastor/vitastor.conf",
"pool_id": 3,
"metadata_image": "s3-volume-meta"
}
}
}

179
managementAgent.js Normal file
View File

@ -0,0 +1,179 @@
const Uuid = require('uuid');
const WebSocket = require('ws');
const logger = require('./lib/utilities/logger');
const { initManagement } = require('./lib/management');
const _config = require('./lib/Config').config;
const { managementAgentMessageType } = require('./lib/management/agentClient');
const { addOverlayMessageListener } = require('./lib/management/push');
const { saveConfigurationVersion } = require('./lib/management/configuration');
// TODO: auth?
// TODO: werelogs with a specific name.
const CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS = 15000;
class ManagementAgentServer {
constructor() {
this.port = _config.managementAgent.port || 8010;
this.wss = null;
this.loadedOverlay = null;
this.stop = this.stop.bind(this);
process.on('SIGINT', this.stop);
process.on('SIGHUP', this.stop);
process.on('SIGQUIT', this.stop);
process.on('SIGTERM', this.stop);
process.on('SIGPIPE', () => {});
}
start(_cb) {
const cb = _cb || function noop() {};
/* Define REPORT_TOKEN env variable needed by the management
* module. */
process.env.REPORT_TOKEN = process.env.REPORT_TOKEN
|| _config.reportToken
|| Uuid.v4();
initManagement(logger.newRequestLogger(), overlay => {
let error = null;
if (overlay) {
this.loadedOverlay = overlay;
this.startServer();
} else {
error = new Error('failed to init management');
}
return cb(error);
});
}
stop() {
if (!this.wss) {
process.exit(0);
return;
}
this.wss.close(() => {
logger.info('server shutdown');
process.exit(0);
});
}
startServer() {
this.wss = new WebSocket.Server({
port: this.port,
clientTracking: true,
path: '/watch',
});
this.wss.on('connection', this.onConnection.bind(this));
this.wss.on('listening', this.onListening.bind(this));
this.wss.on('error', this.onError.bind(this));
setInterval(this.checkBrokenConnections.bind(this),
CHECK_BROKEN_CONNECTIONS_FREQUENCY_MS);
addOverlayMessageListener(this.onNewOverlay.bind(this));
}
onConnection(socket, request) {
function hearthbeat() {
this.isAlive = true;
}
logger.info('client connected to watch route', {
ip: request.connection.remoteAddress,
});
/* eslint-disable no-param-reassign */
socket.isAlive = true;
socket.on('pong', hearthbeat.bind(socket));
if (socket.readyState !== socket.OPEN) {
logger.error('client socket not in ready state', {
state: socket.readyState,
client: socket._socket._peername,
});
return;
}
const msg = {
messageType: managementAgentMessageType.NEW_OVERLAY,
payload: this.loadedOverlay,
};
socket.send(JSON.stringify(msg), error => {
if (error) {
logger.error('failed to send remoteOverlay to client', {
error,
client: socket._socket._peername,
});
}
});
}
onListening() {
logger.info('websocket server listening',
{ port: this.port });
}
onError(error) {
logger.error('websocket server error', { error });
}
_sendNewOverlayToClient(client) {
if (client.readyState !== client.OPEN) {
logger.error('client socket not in ready state', {
state: client.readyState,
client: client._socket._peername,
});
return;
}
const msg = {
messageType: managementAgentMessageType.NEW_OVERLAY,
payload: this.loadedOverlay,
};
client.send(JSON.stringify(msg), error => {
if (error) {
logger.error(
'failed to send remoteOverlay to management agent client', {
error, client: client._socket._peername,
});
}
});
}
onNewOverlay(remoteOverlay) {
const remoteOverlayObj = JSON.parse(remoteOverlay);
saveConfigurationVersion(
this.loadedOverlay, remoteOverlayObj, logger, err => {
if (err) {
logger.error('failed to save remote overlay', { err });
return;
}
this.loadedOverlay = remoteOverlayObj;
this.wss.clients.forEach(
this._sendNewOverlayToClient.bind(this)
);
});
}
checkBrokenConnections() {
this.wss.clients.forEach(client => {
if (!client.isAlive) {
logger.info('close broken connection', {
client: client._socket._peername,
});
client.terminate();
return;
}
client.isAlive = false;
client.ping();
});
}
}
const server = new ManagementAgentServer();
server.start();

View File

@ -192,163 +192,3 @@ tests:
summary: Very high delete latency
exp_labels:
severity: critical
# QuotaMetricsNotAvailable (case with bucket quota)
##################################################################################################
- name: Quota metrics not available (bucket quota)
interval: 1m
input_series:
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 1+1x6 0+0x20 1+1x6
- series: s3_cloudserver_quota_buckets_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
alert_rule_test:
- alertname: QuotaMetricsNotAvailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: critical
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []
# QuotaMetricsNotAvailable (case with account quota)
##################################################################################################
- name: Quota metrics not available (account quota)
interval: 1m
input_series:
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 1+1x6 0+0x20 1+1x6
- series: s3_cloudserver_quota_accounts_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
alert_rule_test:
- alertname: QuotaMetricsNotAvailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: critical
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []
# QuotaMetricsNotAvailable (case with both quota quota)
##################################################################################################
- name: Quota metrics not available (account quota)
interval: 1m
input_series:
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 1+1x6 0+0x20 1+1x6
- series: s3_cloudserver_quota_accounts_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
- series: s3_cloudserver_quota_buckets_count{namespace="zenko",job="artesca-data-ops-report-handler"}
values: 1+1x32
alert_rule_test:
- alertname: QuotaMetricsNotAvailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts:
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: warning
- exp_annotations:
description: The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled.
summary: Utilization metrics service not available
exp_labels:
severity: critical
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []
# QuotaMetricsNotAvailable (case without quota)
##################################################################################################
- name: Utilization service Latency
interval: 1m
input_series:
- series: s3_cloudserver_quota_utilization_service_available{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 1+1x6 0+0x20 1+1x6
alert_rule_test:
- alertname: QuotaMetricsNotAvailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 15m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 20m
exp_alerts: []
- alertname: QuotaMetricsNotAvailable
eval_time: 28m
exp_alerts: []
# QuotaUnavailable
##################################################################################################
- name: Quota evaluation disabled
interval: 1m
input_series:
- series: s3_cloudserver_quota_unavailable_count{namespace="zenko",service="artesca-data-connector-s3api-metrics"}
values: 0+0x6 1+1x20 0+0x6
alert_rule_test:
- alertname: QuotaUnavailable
eval_time: 6m
exp_alerts: []
- alertname: QuotaUnavailable
eval_time: 20m
exp_alerts:
- exp_annotations:
description: Quotas were not honored due to metrics being unavailable. If the S3 Bucket or Account was created recently, the metrics may not be available yet.
summary: High number of quota requests with metrics unavailable
exp_labels:
severity: critical
- alertname: QuotaUnavailable
eval_time: 30m
exp_alerts: []

View File

@ -6,9 +6,6 @@ x-inputs:
- name: service
type: constant
value: artesca-data-connector-s3api-metrics
- name: reportJob
type: constant
value: artesca-data-ops-report-handler
- name: replicas
type: constant
- name: systemErrorsWarningThreshold
@ -29,9 +26,6 @@ x-inputs:
- name: deleteLatencyCriticalThreshold
type: config
value: 1.000
- name: quotaUnavailabilityThreshold
type: config
value: 0.500
groups:
- name: CloudServer
@ -138,45 +132,3 @@ groups:
annotations:
description: "Latency of delete object operations is more than 1s"
summary: "Very high delete latency"
# As a platform admin I want to be alerted (warning) when the utilization metrics service is enabled
# but not available for at least half of the S3 services during the last minute
- alert: QuotaMetricsNotAvailable
expr: |
avg(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"})
< ${quotaUnavailabilityThreshold} and
(max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job="${reportJob}"}) > 0 or
max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job="${reportJob}"}) > 0)
labels:
severity: warning
annotations:
description: "The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled."
summary: "Utilization metrics service not available"
# As a platform admin I want to be alerted (critical) when the utilization metrics service is enabled
# but not available during the last 10 minutes
- alert: QuotaMetricsNotAvailable
expr: |
avg(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",service="${service}"})
< ${quotaUnavailabilityThreshold} and
(max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job="${reportJob}"}) > 0 or
max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job="${reportJob}"}) > 0)
for: 10m
labels:
severity: critical
annotations:
description: "The storage metrics required for Account or S3 Bucket Quota checks are not available, the quotas are disabled."
summary: "Utilization metrics service not available"
# As a platform admin I want to be alerted (critical) when quotas were not honored due to metrics
# being unavailable
- alert: QuotaUnavailable
expr: |
sum(increase(s3_cloudserver_quota_unavailable_count{namespace="${namespace}",service="${service}"}[2m]))
> 0
for: 5m
labels:
severity: critical
annotations:
description: "Quotas were not honored due to metrics being unavailable. If the S3 Bucket or Account was created recently, the metrics may not be available yet."
summary: "High number of quota requests with metrics unavailable"

View File

@ -1931,7 +1931,7 @@
"targets": [
{
"datasource": null,
"expr": "sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
"expr": "sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))",
"format": "heatmap",
"hide": false,
"instant": false,
@ -1960,7 +1960,7 @@
},
"yAxis": {
"decimals": null,
"format": "s",
"format": "dtdurations",
"label": null,
"logBase": 1,
"max": null,
@ -2182,7 +2182,7 @@
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
"expr": "sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))",
"format": "time_series",
"hide": false,
"instant": false,
@ -2196,7 +2196,7 @@
},
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
"expr": "sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace=\"${namespace}\", job=~\"$job\"}[$__rate_interval]))",
"format": "time_series",
"hide": false,
"instant": false,
@ -2665,865 +2665,6 @@
"transformations": [],
"transparent": false,
"type": "piechart"
},
{
"collapsed": false,
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": []
}
}
},
"gridPos": {
"h": 1,
"w": 24,
"x": 0,
"y": 65
},
"hideTimeOverride": false,
"id": 34,
"links": [],
"maxDataPoints": 100,
"panels": [],
"targets": [],
"title": "Quotas",
"transformations": [],
"transparent": false,
"type": "row"
},
{
"datasource": "${DS_PROMETHEUS}",
"description": "Number of S3 buckets with quota enabled in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"custom": {},
"decimals": null,
"mappings": [],
"noValue": "-",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "#808080",
"index": 0,
"line": true,
"op": "gt",
"value": "null",
"yaxis": "left"
},
{
"color": "blue",
"index": 1,
"line": true,
"op": "gt",
"value": 0.0,
"yaxis": "left"
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 0,
"y": 66
},
"hideTimeOverride": false,
"id": 35,
"links": [],
"maxDataPoints": 100,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"targets": [
{
"datasource": null,
"expr": "max(s3_cloudserver_quota_buckets_count{namespace=\"${namespace}\", job=~\"${reportJob}\"})",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Buckets with quota",
"transformations": [],
"transparent": false,
"type": "stat"
},
{
"datasource": "${DS_PROMETHEUS}",
"description": "Number of accounts with quota enabled in the cluster.\nThis value is computed asynchronously, and update may be delayed up to 1h.",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"custom": {},
"decimals": null,
"mappings": [],
"noValue": "-",
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "#808080",
"index": 0,
"line": true,
"op": "gt",
"value": "null",
"yaxis": "left"
},
{
"color": "blue",
"index": 1,
"line": true,
"op": "gt",
"value": 0.0,
"yaxis": "left"
}
]
},
"unit": "short"
},
"overrides": []
},
"gridPos": {
"h": 4,
"w": 6,
"x": 0,
"y": 70
},
"hideTimeOverride": false,
"id": 36,
"links": [],
"maxDataPoints": 100,
"options": {
"colorMode": "value",
"graphMode": "area",
"justifyMode": "auto",
"orientation": "auto",
"reduceOptions": {
"calcs": [
"lastNotNull"
],
"fields": "",
"values": false
},
"textMode": "auto"
},
"targets": [
{
"datasource": null,
"expr": "max(s3_cloudserver_quota_accounts_count{namespace=\"${namespace}\", job=~\"${reportJob}\"})",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Accounts with quota",
"transformations": [],
"transparent": false,
"type": "stat"
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 30,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": []
},
"unit": "ops"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 6,
"y": 66
},
"hideTimeOverride": false,
"id": 37,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [],
"displayMode": "hidden",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_unavailable_count{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Operations with unavailable metrics",
"transformations": [],
"transparent": false,
"type": "timeseries"
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": []
},
"unit": "ops"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 66
},
"hideTimeOverride": false,
"id": 38,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [
"min",
"mean",
"max"
],
"displayMode": "table",
"placement": "right"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval])) by(action)",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{action}}",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Quota evaluaton rate per S3 action",
"transformations": [],
"transparent": false,
"type": "timeseries"
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 30,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "stepAfter",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"index": 0,
"line": true,
"op": "gt",
"value": "null",
"yaxis": "left"
},
{
"color": "orange",
"index": 1,
"line": true,
"op": "gt",
"value": 90.0,
"yaxis": "left"
},
{
"color": "red",
"index": 2,
"line": true,
"op": "gt",
"value": 0.0,
"yaxis": "left"
}
]
},
"unit": "percent"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 0,
"y": 74
},
"hideTimeOverride": false,
"id": 39,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [],
"displayMode": "hidden",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace=\"${namespace}\",job=\"${job}\"}[$__rate_interval])) * 100",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Quota service uptime",
"transformations": [],
"transparent": false,
"type": "timeseries"
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 30,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": []
},
"unit": "ops"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 6,
"x": 6,
"y": 74
},
"hideTimeOverride": false,
"id": 40,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", code=~\"2..\", job=\"${job}\"}[$__rate_interval]))",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "Success",
"metric": "",
"refId": "",
"step": 10,
"target": ""
},
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", code=\"429\", job=\"${job}\"}[$__rate_interval]))",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "Quota Exceeded",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Quota evaluation status code over time",
"transformations": [],
"transparent": false,
"type": "timeseries"
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": 180000,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": []
},
"unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 74
},
"hideTimeOverride": false,
"id": 41,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [
"min",
"mean",
"max"
],
"displayMode": "table",
"placement": "right"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (type)\n /\nsum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (type)",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ type }} (success)",
"metric": "",
"refId": "",
"step": 10,
"target": ""
},
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=\"429\"}[$__rate_interval])) by (type)\n /\nsum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=\"429\"}[$__rate_interval])) by (type)",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ type }} (exceeded)",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Average quota evaluation latencies",
"transformations": [],
"transparent": false,
"type": "timeseries"
},
{
"cards": {
"cardPadding": null,
"cardRound": null
},
"color": {
"cardColor": "#b4ff00",
"colorScale": "sqrt",
"colorScheme": "interpolateOranges",
"exponent": 0.5,
"max": null,
"min": null,
"mode": "opacity"
},
"dataFormat": "tsbuckets",
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"thresholds": {
"mode": "absolute",
"steps": []
}
}
},
"gridPos": {
"h": 8,
"w": 6,
"x": 0,
"y": 82
},
"heatmap": {},
"hideTimeOverride": false,
"hideZeroBuckets": false,
"highlightCards": true,
"id": 42,
"legend": {
"show": false
},
"links": [],
"maxDataPoints": 25,
"reverseYBuckets": false,
"targets": [
{
"datasource": null,
"expr": "sum by(le) (increase(s3_cloudserver_quota_evaluation_duration_seconds_bucket{namespace=\"${namespace}\", job=\"${job}\"}[$__rate_interval]))",
"format": "heatmap",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ le }}",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Quota evaluation duration",
"tooltip": {
"show": true,
"showHistogram": true
},
"transformations": [],
"transparent": false,
"type": "heatmap",
"xAxis": {
"mode": "time",
"name": null,
"show": true,
"values": []
},
"yAxis": {
"decimals": null,
"format": "s",
"label": null,
"logBase": 1,
"max": null,
"min": null,
"show": true
}
},
{
"datasource": "${DS_PROMETHEUS}",
"editable": true,
"error": false,
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"lineInterpolation": "smooth",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"log": 2,
"type": "linear"
},
"showPoints": "auto",
"spanNulls": 180000,
"stacking": {},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": []
},
"unit": "s"
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 18,
"x": 6,
"y": 82
},
"hideTimeOverride": false,
"id": 43,
"links": [],
"maxDataPoints": 100,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom"
},
"tooltip": {
"mode": "single"
}
},
"targets": [
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (class)\n /\nsum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"2..\"}[$__rate_interval])) by (class)",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ class }} (success)",
"metric": "",
"refId": "",
"step": 10,
"target": ""
},
{
"datasource": null,
"expr": "sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace=\"${namespace}\", job=\"${job}\", code=~\"4..|5..\"}[$__rate_interval])) by (class)\n /\nsum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace=\"${namespace}\", job=\"${job}\", code=~\"4..|5..\"}[$__rate_interval])) by (class)",
"format": "time_series",
"hide": false,
"instant": false,
"interval": "",
"intervalFactor": 1,
"legendFormat": "{{ class }} (error)",
"metric": "",
"refId": "",
"step": 10,
"target": ""
}
],
"title": "Average utilization metrics retrieval latencies",
"transformations": [],
"transparent": false,
"type": "timeseries"
}
],
"refresh": "30s",
@ -3625,5 +2766,5 @@
"timezone": "",
"title": "S3 service",
"uid": null,
"version": 110
"version": 31
}

View File

@ -366,28 +366,6 @@ def average_latency_target(title, action="", by=""):
)
def average_quota_latency_target(code="", by=""):
# type: (str, str) -> Target
extra = ', code=' + code if code else ""
by = " by (" + by + ")" if by else ""
return "\n".join([
'sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_sum{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501
" /",
'sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501,
])
def average_quota_retrieval_latency(code="", by=""):
# type: (str, str) -> Target
extra = ', code=' + code if code else ""
by = " by (" + by + ")" if by else ""
return "\n".join([
'sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_sum{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501
" /",
'sum(rate(s3_cloudserver_quota_metrics_retrieval_duration_seconds_count{namespace="${namespace}", job="${job}"' + extra + "}[$__rate_interval]))" + by, # noqa: E501,
])
averageLatencies = TimeSeries(
title="Average latencies",
dataSource="${DS_PROMETHEUS}",
@ -428,10 +406,10 @@ requestTime = Heatmap(
dataFormat="tsbuckets",
maxDataPoints=25,
tooltip=Tooltip(show=True, showHistogram=True),
yAxis=YAxis(format=UNITS.SECONDS),
yAxis=YAxis(format=UNITS.DURATION_SECONDS),
color=HeatmapColor(mode="opacity"),
targets=[Target(
expr='sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
expr='sum by(le) (increase(s3_cloudserver_http_request_duration_seconds_bucket{namespace="${namespace}", job=~"$job"}[$__rate_interval]))', # noqa: E501
format="heatmap",
legendFormat="{{ le }}",
)],
@ -455,11 +433,11 @@ bandWidth = TimeSeries(
unit="binBps",
targets=[
Target(
expr='sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
expr='sum(rate(s3_cloudserver_http_response_size_bytes_sum{namespace="${namespace}", job=~"$job"}[$__rate_interval]))', # noqa: E501
legendFormat="Out"
),
Target(
expr='sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
expr='sum(rate(s3_cloudserver_http_request_size_bytes_sum{namespace="${namespace}", job=~"$job"}[$__rate_interval]))', # noqa: E501
legendFormat="In"
)
],
@ -547,174 +525,6 @@ top10Error5xxByBucket = top10_errors_by_bucket(
title="5xx : Top10 by Bucket", code='~"5.."'
)
quotaHealth = TimeSeries(
title="Quota service uptime",
legendDisplayMode="hidden",
dataSource="${DS_PROMETHEUS}",
lineInterpolation="stepAfter",
fillOpacity=30,
unit=UNITS.PERCENT_FORMAT,
targets=[Target(
expr='avg(avg_over_time(s3_cloudserver_quota_utilization_service_available{namespace="${namespace}",job="${job}"}[$__rate_interval])) * 100', # noqa: E501
)],
thresholds=[
Threshold("green", 0, 95.0),
Threshold("orange", 1, 90.0),
Threshold("red", 2, 0.0),
],
)
quotaStatusCode = TimeSeries(
title="Quota evaluation status code over time",
dataSource="${DS_PROMETHEUS}",
fillOpacity=30,
lineInterpolation="smooth",
unit=UNITS.OPS_PER_SEC,
targets=[Target(
expr='sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", code=~"2..", job="${job}"}[$__rate_interval]))', # noqa: E501
legendFormat="Success",
), Target(
expr='sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", code="429", job="${job}"}[$__rate_interval]))', # noqa: E501
legendFormat="Quota Exceeded",
)],
)
quotaByAction = TimeSeries(
title="Quota evaluaton rate per S3 action",
dataSource="${DS_PROMETHEUS}",
legendDisplayMode="table",
legendPlacement="right",
legendValues=["min", "mean", "max"],
lineInterpolation="smooth",
unit=UNITS.OPS_PER_SEC,
targets=[
Target(
expr='sum(rate(s3_cloudserver_quota_evaluation_duration_seconds_count{namespace="${namespace}", job="${job}"}[$__rate_interval])) by(action)', # noqa: E501
legendFormat="{{action}}",
)
]
)
averageQuotaDuration = Heatmap(
title="Quota evaluation duration",
dataSource="${DS_PROMETHEUS}",
dataFormat="tsbuckets",
maxDataPoints=25,
tooltip=Tooltip(show=True, showHistogram=True),
yAxis=YAxis(format=UNITS.SECONDS),
color=HeatmapColor(mode="opacity"),
targets=[Target(
expr='sum by(le) (increase(s3_cloudserver_quota_evaluation_duration_seconds_bucket{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
format="heatmap",
legendFormat="{{ le }}",
)],
)
operationsWithUnavailableMetrics = TimeSeries(
title="Operations with unavailable metrics",
dataSource="${DS_PROMETHEUS}",
fillOpacity=30,
lineInterpolation="smooth",
unit=UNITS.OPS_PER_SEC,
legendDisplayMode="hidden",
targets=[Target(
expr='sum(rate(s3_cloudserver_quota_unavailable_count{namespace="${namespace}", job="${job}"}[$__rate_interval]))', # noqa: E501
)],
)
averageQuotaLatencies = TimeSeries(
title="Average quota evaluation latencies",
dataSource="${DS_PROMETHEUS}",
lineInterpolation="smooth",
spanNulls=3*60*1000,
legendDisplayMode="table",
legendPlacement="right",
legendValues=["min", "mean", "max"],
unit=UNITS.SECONDS,
targets=[
Target(
expr=average_quota_latency_target(code='~"2.."', by='type'),
legendFormat='{{ type }} (success)',
),
Target(
expr=average_quota_latency_target(code='"429"', by='type'),
legendFormat='{{ type }} (exceeded)',
),
],
)
averageMetricsRetrievalLatencies = TimeSeries(
title="Average utilization metrics retrieval latencies",
dataSource="${DS_PROMETHEUS}",
lineInterpolation="smooth",
spanNulls=3*60*1000,
unit=UNITS.SECONDS,
targets=[
Target(
expr=average_quota_retrieval_latency(code='~"2.."', by='class'),
legendFormat='{{ class }} (success)',
),
Target(
expr=average_quota_retrieval_latency(
code='~"4..|5.."',
by='class'
),
legendFormat='{{ class }} (error)',
),
],
)
bucketQuotaCounter = Stat(
title="Buckets with quota",
description=(
"Number of S3 buckets with quota enabled in the cluster.\n"
"This value is computed asynchronously, and update "
"may be delayed up to 1h."
),
dataSource="${DS_PROMETHEUS}",
colorMode="value",
format=UNITS.SHORT,
noValue="-",
reduceCalc="lastNotNull",
targets=[Target(
expr='max(s3_cloudserver_quota_buckets_count{namespace="${namespace}", job=~"${reportJob}"})', # noqa: E501
)],
thresholds=[
Threshold("#808080", 0, 0.0),
Threshold("blue", 1, 0.0),
],
)
accountQuotaCounter = Stat(
title="Accounts with quota",
description=(
"Number of accounts with quota enabled in the cluster.\n"
"This value is computed asynchronously, and update "
"may be delayed up to 1h."
),
dataSource="${DS_PROMETHEUS}",
colorMode="value",
format=UNITS.SHORT,
noValue="-",
reduceCalc="lastNotNull",
targets=[Target(
expr='max(s3_cloudserver_quota_accounts_count{namespace="${namespace}", job=~"${reportJob}"})', # noqa: E501
)],
thresholds=[
Threshold("#808080", 0, 0.0),
Threshold("blue", 1, 0.0),
],
)
dashboard = (
Dashboard(
title="S3 service",
@ -820,24 +630,6 @@ dashboard = (
top10Error500ByBucket,
top10Error5xxByBucket
], height=8),
RowPanel(title="Quotas"),
layout.row([
layout.column([
layout.resize([bucketQuotaCounter], width=6, height=4),
layout.resize([accountQuotaCounter], width=6, height=4),
], height=8),
layout.resize([operationsWithUnavailableMetrics], width=6),
quotaByAction,
], height=8),
layout.row([
layout.resize([quotaHealth], width=6),
layout.resize([quotaStatusCode], width=6),
averageQuotaLatencies,
], height=8),
layout.row([
layout.resize([averageQuotaDuration], width=6),
averageMetricsRetrievalLatencies,
], height=8),
]),
)
.auto_panel_ids()

View File

@ -45,8 +45,8 @@ then
exit 1
fi
REGISTRY=${REGISTRY:-"ghcr.io/scality"}
PROJECT=${PROJECT:-"cloudserver"}
REGISTRY=${REGISTRY:-"registry.scality.com"}
PROJECT=${PROJECT:-"cloudserver-dev"}
set -x
${ORAS} push "${REGISTRY}/${PROJECT}/${NAME_TAG}" "${INPUT_FILE}:${MIME_TYPE}"

View File

@ -1,6 +1,6 @@
{
"name": "@zenko/cloudserver",
"version": "8.8.27",
"version": "8.8.20",
"description": "Zenko CloudServer, an open-source Node.js implementation of a server handling the Amazon S3 protocol",
"main": "index.js",
"engines": {
@ -21,13 +21,14 @@
"dependencies": {
"@azure/storage-blob": "^12.12.0",
"@hapi/joi": "^17.1.0",
"arsenal": "git+https://git.yourcmc.ru/vitalif/zenko-arsenal.git#development/8.1",
"async": "^2.5.0",
"aws-sdk": "^2.905.0",
"arsenal": "git+https://github.com/scality/arsenal#77e9b92f3e775e39f5f903a00b702a86b2aa75a1",
"async": "~2.5.0",
"aws-sdk": "2.905.0",
"bucketclient": "scality/bucketclient#8.1.9",
"bufferutil": "^4.0.6",
"commander": "^2.9.0",
"cron-parser": "^2.11.0",
"diskusage": "^1.1.3",
"diskusage": "1.1.3",
"google-auto-auth": "^0.9.1",
"http-proxy": "^1.17.0",
"http-proxy-agent": "^4.0.1",
@ -37,45 +38,38 @@
"mongodb": "^5.2.0",
"node-fetch": "^2.6.0",
"node-forge": "^0.7.1",
"npm-run-all": "^4.1.5",
"npm-run-all": "~4.1.5",
"prom-client": "14.2.0",
"request": "^2.81.0",
"scubaclient": "git+https://git.yourcmc.ru/vitalif/zenko-scubaclient.git",
"sql-where-parser": "^2.2.1",
"utapi": "git+https://git.yourcmc.ru/vitalif/zenko-utapi.git",
"scubaclient": "git+https://github.com/scality/scubaclient.git",
"sql-where-parser": "~2.2.1",
"utapi": "github:scality/utapi#8.1.13",
"utf-8-validate": "^5.0.8",
"utf8": "^2.1.1",
"utf8": "~2.1.1",
"uuid": "^8.3.2",
"werelogs": "git+https://git.yourcmc.ru/vitalif/zenko-werelogs.git#development/8.1",
"vaultclient": "scality/vaultclient#8.3.11",
"werelogs": "scality/werelogs#8.1.4",
"ws": "^5.1.0",
"xml2js": "^0.4.16"
},
"overrides": {
"ltgt": "^2.2.0"
"xml2js": "~0.4.16"
},
"devDependencies": {
"@babel/core": "^7.25.2",
"@babel/preset-env": "^7.25.3",
"babel-loader": "^9.1.3",
"bluebird": "^3.3.1",
"eslint": "^8.14.0",
"eslint-config-airbnb-base": "^15.0.0",
"eslint-config-scality": "git+https://git.yourcmc.ru/vitalif/zenko-eslint-config-scality.git",
"eslint-config-airbnb-base": "^13.1.0",
"eslint-config-scality": "scality/Guidelines#8.2.0",
"eslint-plugin-import": "^2.14.0",
"eslint-plugin-mocha": "^10.1.0",
"express": "^4.17.1",
"ioredis": "^4.9.5",
"istanbul": "^1.0.0-alpha.2",
"istanbul-api": "^1.0.0-alpha.13",
"ioredis": "4.9.5",
"istanbul": "1.0.0-alpha.2",
"istanbul-api": "1.0.0-alpha.13",
"lolex": "^1.4.0",
"mocha": ">=3.1.2",
"mocha": "^2.3.4",
"mocha-junit-reporter": "^1.23.1",
"mocha-multi-reporters": "^1.1.7",
"node-mocks-http": "^1.5.2",
"node-mocks-http": "1.5.2",
"sinon": "^13.0.1",
"tv4": "^1.2.7",
"webpack": "^5.93.0",
"webpack-cli": "^5.1.4"
"tv4": "^1.2.7"
},
"scripts": {
"cloudserver": "S3METADATA=mongodb npm-run-all --parallel start_dataserver start_s3server",
@ -116,11 +110,11 @@
"utapi_replay": "node lib/utapi/utapiReplay.js",
"utapi_reindex": "node lib/utapi/utapiReindex.js",
"management_agent": "node managementAgent.js",
"test": "CI=true S3BACKEND=mem S3QUOTA=scuba mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
"test": "CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
"test_versionid_base62": "S3_VERSION_ID_ENCODING_TYPE=base62 CI=true S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit/api",
"test_legacy_location": "CI=true S3QUOTA=scuba S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
"test_legacy_location": "CI=true S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
"test_utapi_v2": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/utapi",
"test_quota": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/quota",
"test_scuba": "mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/scuba",
"multiple_backend_test": "CI=true S3BACKEND=mem S3DATA=multiple mocha --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json -t 20000 --recursive tests/multipleBackend",
"unit_coverage": "CI=true mkdir -p coverage/unit/ && S3BACKEND=mem istanbul cover --dir coverage/unit _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --recursive tests/unit",
"unit_coverage_legacy_location": "CI=true mkdir -p coverage/unitlegacylocation/ && S3_LOCATION_FILE=tests/locationConfig/locationConfigLegacy.json S3BACKEND=mem istanbul cover --dir coverage/unitlegacylocation _mocha -- --reporter mocha-multi-reporters --reporter-options configFile=$INIT_CWD/tests/reporter-config.json --reporter mocha-junit-reporter --recursive tests/unit"

View File

@ -10,7 +10,6 @@ const nonExistantBucket = 'updatequotatestnonexistantbucket';
const quota = { quota: 2000 };
const negativeQuota = { quota: -1000 };
const wrongquotaFromat = '1000';
const largeQuota = { quota: 1000000000000 };
describe('Test update bucket quota', () => {
let s3;
@ -59,12 +58,4 @@ describe('Test update bucket quota', () => {
assert.strictEqual(err.Error.Message[0], 'Request body must be a JSON object');
}
});
it('should handle large quota values', async () => {
try {
await sendRequest('PUT', '127.0.0.1:8000', `/${bucket}/?quota=true`, JSON.stringify(largeQuota));
} catch (err) {
assert.fail(`Expected no error, but got ${err}`);
}
});
});

View File

@ -1,785 +0,0 @@
const async = require('async');
const assert = require('assert');
const { S3 } = require('aws-sdk');
const getConfig = require('../functional/aws-node-sdk/test/support/config');
const { Scuba: MockScuba, inflightFlushFrequencyMS } = require('../utilities/mock/Scuba');
const sendRequest = require('../functional/aws-node-sdk/test/quota/tooling').sendRequest;
const memCredentials = require('../functional/aws-node-sdk/lib/json/mem_credentials.json');
const metadata = require('../../lib/metadata/wrapper');
const { fakeMetadataArchive } = require('../functional/aws-node-sdk/test/utils/init');
const { config: s3Config } = require('../../lib/Config');
let mockScuba = null;
let s3Client = null;
const quota = { quota: 1000 };
function wait(timeoutMs, cb) {
if (s3Config.isQuotaInflightEnabled()) {
return setTimeout(cb, timeoutMs);
}
return cb();
}
function createBucket(bucket, locked, cb) {
const config = {
Bucket: bucket,
};
if (locked) {
config.ObjectLockEnabledForBucket = true;
}
return s3Client.createBucket(config, (err, data) => {
assert.ifError(err);
return cb(err, data);
});
}
function configureBucketVersioning(bucket, cb) {
return s3Client.putBucketVersioning({
Bucket: bucket,
VersioningConfiguration: {
Status: 'Enabled',
},
}, (err, data) => {
assert.ifError(err);
return cb(err, data);
});
}
function putObjectLockConfiguration(bucket, cb) {
return s3Client.putObjectLockConfiguration({
Bucket: bucket,
ObjectLockConfiguration: {
ObjectLockEnabled: 'Enabled',
Rule: {
DefaultRetention: {
Mode: 'GOVERNANCE',
Days: 1,
},
},
},
}, (err, data) => {
assert.ifError(err);
return cb(err, data);
});
}
function deleteBucket(bucket, cb) {
return s3Client.deleteBucket({
Bucket: bucket,
}, err => {
assert.ifError(err);
return cb(err);
});
}
function putObject(bucket, key, size, cb) {
return s3Client.putObject({
Bucket: bucket,
Key: key,
Body: Buffer.alloc(size),
}, (err, data) => {
if (!err && !s3Config.isQuotaInflightEnabled()) {
mockScuba.incrementBytesForBucket(bucket, size);
}
return cb(err, data);
});
}
function putObjectWithCustomHeader(bucket, key, size, vID, cb) {
const request = s3Client.putObject({
Bucket: bucket,
Key: key,
Body: Buffer.alloc(size),
});
request.on('build', () => {
request.httpRequest.headers['x-scal-s3-version-id'] = vID;
});
return request.send((err, data) => {
if (!err && !s3Config.isQuotaInflightEnabled()) {
mockScuba.incrementBytesForBucket(bucket, 0);
}
return cb(err, data);
});
}
function copyObject(bucket, key, sourceSize, cb) {
return s3Client.copyObject({
Bucket: bucket,
CopySource: `/${bucket}/${key}`,
Key: `${key}-copy`,
}, (err, data) => {
if (!err && !s3Config.isQuotaInflightEnabled()) {
mockScuba.incrementBytesForBucket(bucket, sourceSize);
}
return cb(err, data);
});
}
function deleteObject(bucket, key, size, cb) {
return s3Client.deleteObject({
Bucket: bucket,
Key: key,
}, err => {
if (!err && !s3Config.isQuotaInflightEnabled()) {
mockScuba.incrementBytesForBucket(bucket, -size);
}
assert.ifError(err);
return cb(err);
});
}
function deleteVersionID(bucket, key, versionId, size, cb) {
return s3Client.deleteObject({
Bucket: bucket,
Key: key,
VersionId: versionId,
}, (err, data) => {
if (!err && !s3Config.isQuotaInflightEnabled()) {
mockScuba.incrementBytesForBucket(bucket, -size);
}
return cb(err, data);
});
}
function objectMPU(bucket, key, parts, partSize, callback) {
let ETags = [];
let uploadId = null;
const partNumbers = Array.from(Array(parts).keys());
const initiateMPUParams = {
Bucket: bucket,
Key: key,
};
return async.waterfall([
next => s3Client.createMultipartUpload(initiateMPUParams,
(err, data) => {
if (err) {
return next(err);
}
uploadId = data.UploadId;
return next();
}),
next =>
async.mapLimit(partNumbers, 1, (partNumber, callback) => {
const uploadPartParams = {
Bucket: bucket,
Key: key,
PartNumber: partNumber + 1,
UploadId: uploadId,
Body: Buffer.alloc(partSize),
};
return s3Client.uploadPart(uploadPartParams,
(err, data) => {
if (err) {
return callback(err);
}
return callback(null, data.ETag);
});
}, (err, results) => {
if (err) {
return next(err);
}
ETags = results;
return next();
}),
next => {
const params = {
Bucket: bucket,
Key: key,
MultipartUpload: {
Parts: partNumbers.map(n => ({
ETag: ETags[n],
PartNumber: n + 1,
})),
},
UploadId: uploadId,
};
return s3Client.completeMultipartUpload(params, next);
},
], err => {
if (!err && !s3Config.isQuotaInflightEnabled()) {
mockScuba.incrementBytesForBucket(bucket, parts * partSize);
}
return callback(err, uploadId);
});
}
function abortMPU(bucket, key, uploadId, size, callback) {
return s3Client.abortMultipartUpload({
Bucket: bucket,
Key: key,
UploadId: uploadId,
}, (err, data) => {
if (!err && !s3Config.isQuotaInflightEnabled()) {
mockScuba.incrementBytesForBucket(bucket, -size);
}
return callback(err, data);
});
}
function uploadPartCopy(bucket, key, partNumber, partSize, sleepDuration, keyToCopy, callback) {
const ETags = [];
let uploadId = null;
const parts = 5;
const partNumbers = Array.from(Array(parts).keys());
const initiateMPUParams = {
Bucket: bucket,
Key: key,
};
if (!s3Config.isQuotaInflightEnabled()) {
mockScuba.incrementBytesForBucket(bucket, parts * partSize);
}
return async.waterfall([
next => s3Client.createMultipartUpload(initiateMPUParams,
(err, data) => {
if (err) {
return next(err);
}
uploadId = data.UploadId;
return next();
}),
next => {
const uploadPartParams = {
Bucket: bucket,
Key: key,
PartNumber: partNumber + 1,
UploadId: uploadId,
Body: Buffer.alloc(partSize),
};
return s3Client.uploadPart(uploadPartParams, (err, data) => {
if (err) {
return next(err);
}
ETags[partNumber] = data.ETag;
return next();
});
},
next => wait(sleepDuration, next),
next => {
const copyPartParams = {
Bucket: bucket,
CopySource: `/${bucket}/${keyToCopy}`,
Key: `${key}-copy`,
PartNumber: partNumber + 1,
UploadId: uploadId,
};
return s3Client.uploadPartCopy(copyPartParams, (err, data) => {
if (err) {
return next(err);
}
ETags[partNumber] = data.ETag;
return next(null, data.ETag);
});
},
next => {
const params = {
Bucket: bucket,
Key: key,
MultipartUpload: {
Parts: partNumbers.map(n => ({
ETag: ETags[n],
PartNumber: n + 1,
})),
},
UploadId: uploadId,
};
return s3Client.completeMultipartUpload(params, next);
},
], err => {
if (err && !s3Config.isQuotaInflightEnabled()) {
mockScuba.incrementBytesForBucket(bucket, -(parts * partSize));
}
return callback(err, uploadId);
});
}
function restoreObject(bucket, key, size, callback) {
return s3Client.restoreObject({
Bucket: bucket,
Key: key,
RestoreRequest: {
Days: 1,
},
}, (err, data) => {
if (!err && !s3Config.isQuotaInflightEnabled()) {
mockScuba.incrementBytesForBucket(bucket, size);
}
return callback(err, data);
});
}
function multiObjectDelete(bucket, keys, size, callback) {
if (!s3Config.isQuotaInflightEnabled()) {
mockScuba.incrementBytesForBucket(bucket, -size);
}
return s3Client.deleteObjects({
Bucket: bucket,
Delete: {
Objects: keys.map(key => ({ Key: key })),
},
}, (err, data) => {
if (err && !s3Config.isQuotaInflightEnabled()) {
mockScuba.incrementBytesForBucket(bucket, size);
}
return callback(err, data);
});
}
(process.env.S3METADATA === 'mongodb' ? describe : describe.skip)('quota evaluation with scuba metrics',
function t() {
this.timeout(30000);
const scuba = new MockScuba();
const putQuotaVerb = 'PUT';
const config = {
accessKey: memCredentials.default.accessKey,
secretKey: memCredentials.default.secretKey,
};
mockScuba = scuba;
before(done => {
const config = getConfig('default', { signatureVersion: 'v4', maxRetries: 0 });
s3Client = new S3(config);
scuba.start();
return metadata.setup(err => wait(2000, () => done(err)));
});
afterEach(() => {
scuba.reset();
});
after(() => {
scuba.stop();
});
it('should return QuotaExceeded when trying to PutObject in a bucket with quota', done => {
const bucket = 'quota-test-bucket1';
const key = 'quota-test-object';
const size = 1024;
return async.series([
next => createBucket(bucket, false, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, err => {
assert.strictEqual(err.code, 'QuotaExceeded');
return next();
}),
next => deleteBucket(bucket, next),
], done);
});
it('should return QuotaExceeded when trying to copyObject in a versioned bucket with quota', done => {
const bucket = 'quota-test-bucket12';
const key = 'quota-test-object';
const size = 900;
let vID = null;
return async.series([
next => createBucket(bucket, false, next),
next => configureBucketVersioning(bucket, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, (err, data) => {
assert.ifError(err);
vID = data.VersionId;
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => copyObject(bucket, key, size, err => {
assert.strictEqual(err.code, 'QuotaExceeded');
return next();
}),
next => deleteVersionID(bucket, key, vID, size, next),
next => deleteBucket(bucket, next),
], done);
});
it('should return QuotaExceeded when trying to CopyObject in a bucket with quota', done => {
const bucket = 'quota-test-bucket2';
const key = 'quota-test-object';
const size = 900;
return async.series([
next => createBucket(bucket, false, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, next),
next => wait(inflightFlushFrequencyMS * 2, next),
next => copyObject(bucket, key, size, err => {
assert.strictEqual(err.code, 'QuotaExceeded');
return next();
}),
next => deleteObject(bucket, key, size, next),
next => deleteBucket(bucket, next),
], done);
});
it('should return QuotaExceeded when trying to complete MPU in a bucket with quota', done => {
const bucket = 'quota-test-bucket3';
const key = 'quota-test-object';
const parts = 5;
const partSize = 1024 * 1024 * 6;
let uploadId = null;
return async.series([
next => createBucket(bucket, false, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => objectMPU(bucket, key, parts, partSize, (err, _uploadId) => {
uploadId = _uploadId;
assert.strictEqual(err.code, 'QuotaExceeded');
return next();
}),
next => abortMPU(bucket, key, uploadId, 0, next),
next => wait(inflightFlushFrequencyMS * 2, next),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), 0);
return next();
},
next => deleteBucket(bucket, next),
], done);
});
it('should not return QuotaExceeded if the quota is not exceeded', done => {
const bucket = 'quota-test-bucket4';
const key = 'quota-test-object';
const size = 300;
return async.series([
next => createBucket(bucket, false, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, err => {
assert.ifError(err);
return next();
}),
next => deleteObject(bucket, key, size, next),
next => deleteBucket(bucket, next),
], done);
});
it('should not evaluate quotas if the backend is not available', done => {
scuba.stop();
const bucket = 'quota-test-bucket5';
const key = 'quota-test-object';
const size = 1024;
return async.series([
next => createBucket(bucket, false, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, err => {
assert.ifError(err);
return next();
}),
next => deleteObject(bucket, key, size, next),
next => deleteBucket(bucket, next),
], err => {
assert.ifError(err);
scuba.start();
return wait(2000, done);
});
});
it('should return QuotaExceeded when trying to copy a part in a bucket with quota', done => {
const bucket = 'quota-test-bucket6';
const key = 'quota-test-object-copy';
const keyToCopy = 'quota-test-existing';
const parts = 5;
const partSize = 1024 * 1024 * 6;
let uploadId = null;
return async.series([
next => createBucket(bucket, false, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify({ quota: Math.round(partSize * 2.5) }), config)
.then(() => next()).catch(err => next(err)),
next => putObject(bucket, keyToCopy, partSize, next),
next => uploadPartCopy(bucket, key, parts, partSize, inflightFlushFrequencyMS * 2, keyToCopy,
(err, _uploadId) => {
uploadId = _uploadId;
assert.strictEqual(err.code, 'QuotaExceeded');
return next();
}),
next => abortMPU(bucket, key, uploadId, parts * partSize, next),
next => deleteObject(bucket, keyToCopy, partSize, next),
next => deleteBucket(bucket, next),
], done);
});
it('should return QuotaExceeded when trying to restore an object in a bucket with quota', done => {
const bucket = 'quota-test-bucket7';
const key = 'quota-test-object';
const size = 900;
let vID = null;
return async.series([
next => createBucket(bucket, false, next),
next => configureBucketVersioning(bucket, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, (err, data) => {
assert.ifError(err);
vID = data.VersionId;
return next();
}),
next => fakeMetadataArchive(bucket, key, vID, {
archiveInfo: {},
}, next),
next => wait(inflightFlushFrequencyMS * 2, next),
next => restoreObject(bucket, key, size, err => {
assert.strictEqual(err.code, 'QuotaExceeded');
return next();
}),
next => deleteVersionID(bucket, key, vID, size, next),
next => deleteBucket(bucket, next),
], done);
});
it('should not update the inflights if the quota check is passing but the object is already restored', done => {
const bucket = 'quota-test-bucket14';
const key = 'quota-test-object';
const size = 100;
let vID = null;
return async.series([
next => createBucket(bucket, false, next),
next => configureBucketVersioning(bucket, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, (err, data) => {
assert.ifError(err);
vID = data.VersionId;
return next();
}),
next => fakeMetadataArchive(bucket, key, vID, {
archiveInfo: {},
restoreRequestedAt: new Date(0).toString(),
restoreCompletedAt: new Date(0).toString() + 1,
restoreRequestedDays: 5,
}, next),
next => wait(inflightFlushFrequencyMS * 2, next),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
return next();
},
next => restoreObject(bucket, key, 0, next),
next => wait(inflightFlushFrequencyMS * 2, next),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
return next();
},
next => deleteVersionID(bucket, key, vID, size, next),
next => deleteBucket(bucket, next),
], done);
});
it('should allow writes after deleting data with quotas', done => {
const bucket = 'quota-test-bucket8';
const key = 'quota-test-object';
const size = 400;
return async.series([
next => createBucket(bucket, false, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, `${key}1`, size, err => {
assert.ifError(err);
return next();
}),
next => putObject(bucket, `${key}2`, size, err => {
assert.ifError(err);
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => putObject(bucket, `${key}3`, size, err => {
assert.strictEqual(err.code, 'QuotaExceeded');
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size * 2);
return next();
},
next => wait(inflightFlushFrequencyMS * 2, next),
next => deleteObject(bucket, `${key}2`, size, next),
next => wait(inflightFlushFrequencyMS * 2, next),
next => putObject(bucket, `${key}4`, size, err => {
assert.ifError(err);
return next();
}),
next => deleteObject(bucket, `${key}1`, size, next),
next => deleteObject(bucket, `${key}3`, size, next),
next => deleteObject(bucket, `${key}4`, size, next),
next => deleteBucket(bucket, next),
], done);
});
it('should not increase the inflights when the object is being rewritten with a smaller object', done => {
const bucket = 'quota-test-bucket9';
const key = 'quota-test-object';
const size = 400;
return async.series([
next => createBucket(bucket, false, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, err => {
assert.ifError(err);
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => putObject(bucket, key, size - 100, err => {
assert.ifError(err);
if (!s3Config.isQuotaInflightEnabled()) {
mockScuba.incrementBytesForBucket(bucket, -size);
}
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size - 100);
return next();
},
next => deleteObject(bucket, key, size, next),
next => deleteBucket(bucket, next),
], done);
});
it('should decrease the inflights when performing multi object delete', done => {
const bucket = 'quota-test-bucket10';
const key = 'quota-test-object';
const size = 400;
return async.series([
next => createBucket(bucket, false, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, `${key}1`, size, err => {
assert.ifError(err);
return next();
}
),
next => putObject(bucket, `${key}2`, size, err => {
assert.ifError(err);
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => multiObjectDelete(bucket, [`${key}1`, `${key}2`], size * 2, err => {
assert.ifError(err);
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), 0);
return next();
},
next => deleteBucket(bucket, next),
], done);
});
it('should not update the inflights if the API errored after evaluating quotas (deletion)', done => {
const bucket = 'quota-test-bucket11';
const key = 'quota-test-object';
const size = 100;
let vID = null;
return async.series([
next => createBucket(bucket, true, next),
next => putObjectLockConfiguration(bucket, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, (err, val) => {
assert.ifError(err);
vID = val.VersionId;
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
return next();
},
next => deleteVersionID(bucket, key, vID, size, err => {
assert.strictEqual(err.code, 'AccessDenied');
next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
return next();
},
], done);
});
it('should only evaluate quota and not update inflights for PutObject with the x-scal-s3-version-id header',
done => {
const bucket = 'quota-test-bucket13';
const key = 'quota-test-object';
const size = 100;
let vID = null;
return async.series([
next => createBucket(bucket, true, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, (err, val) => {
assert.ifError(err);
vID = val.VersionId;
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
return next();
},
next => fakeMetadataArchive(bucket, key, vID, {
archiveInfo: {},
restoreRequestedAt: new Date(0).toISOString(),
restoreRequestedDays: 7,
}, next),
// Simulate the real restore
next => putObjectWithCustomHeader(bucket, key, size, vID, err => {
assert.ifError(err);
return next();
}),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
return next();
},
next => deleteVersionID(bucket, key, vID, size, next),
next => deleteBucket(bucket, next),
], done);
});
it('should allow a restore if the quota is full but the objet fits with its reserved storage space',
done => {
const bucket = 'quota-test-bucket15';
const key = 'quota-test-object';
const size = 1000;
let vID = null;
return async.series([
next => createBucket(bucket, true, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, (err, val) => {
assert.ifError(err);
vID = val.VersionId;
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
return next();
},
next => fakeMetadataArchive(bucket, key, vID, {
archiveInfo: {},
restoreRequestedAt: new Date(0).toISOString(),
restoreRequestedDays: 7,
}, next),
// Put an object, the quota should be exceeded
next => putObject(bucket, `${key}-2`, size, err => {
assert.strictEqual(err.code, 'QuotaExceeded');
return next();
}),
// Simulate the real restore
next => putObjectWithCustomHeader(bucket, key, size, vID, err => {
assert.ifError(err);
return next();
}),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
return next();
},
next => deleteVersionID(bucket, key, vID, size, next),
next => deleteBucket(bucket, next),
], done);
});
});

479
tests/scuba/awsNodeSdk.js Normal file
View File

@ -0,0 +1,479 @@
const async = require('async');
const assert = require('assert');
const { S3 } = require('aws-sdk');
const getConfig = require('../functional/aws-node-sdk/test/support/config');
const { Scuba: MockScuba, inflightFlushFrequencyMS } = require('../utilities/mock/Scuba');
const sendRequest = require('../functional/aws-node-sdk/test/quota/tooling').sendRequest;
const memCredentials = require('../functional/aws-node-sdk/lib/json/mem_credentials.json');
let s3Client = null;
const quota = { quota: 1000 };
function wait(timeoutMs, cb) {
setTimeout(cb, timeoutMs);
}
function createBucket(bucket, cb) {
return s3Client.createBucket({
Bucket: bucket,
}, (err, data) => {
assert.ifError(err);
return cb(err, data);
});
}
function deleteBucket(bucket, cb) {
return s3Client.deleteBucket({
Bucket: bucket,
}, err => {
assert.ifError(err);
return cb(err);
});
}
function putObject(bucket, key, size, cb) {
return s3Client.putObject({
Bucket: bucket,
Key: key,
Body: Buffer.alloc(size),
}, cb);
}
function copyObject(bucket, key, cb) {
return s3Client.copyObject({
Bucket: bucket,
CopySource: `/${bucket}/${key}`,
Key: `${key}-copy`,
}, cb);
}
function deleteObject(bucket, key, cb) {
return s3Client.deleteObject({
Bucket: bucket,
Key: key,
}, err => {
assert.ifError(err);
return cb(err);
});
}
function objectMPU(bucket, key, parts, partSize, callback) {
let ETags = [];
let uploadId = null;
const partNumbers = Array.from(Array(parts).keys());
const initiateMPUParams = {
Bucket: bucket,
Key: key,
};
return async.waterfall([
next => s3Client.createMultipartUpload(initiateMPUParams,
(err, data) => {
if (err) {
return next(err);
}
uploadId = data.UploadId;
return next();
}),
next =>
async.mapLimit(partNumbers, 1, (partNumber, callback) => {
const uploadPartParams = {
Bucket: bucket,
Key: key,
PartNumber: partNumber + 1,
UploadId: uploadId,
Body: Buffer.alloc(partSize),
};
return s3Client.uploadPart(uploadPartParams,
(err, data) => {
if (err) {
return callback(err);
}
return callback(null, data.ETag);
});
}, (err, results) => {
if (err) {
return next(err);
}
ETags = results;
return next();
}),
next => {
const params = {
Bucket: bucket,
Key: key,
MultipartUpload: {
Parts: partNumbers.map(n => ({
ETag: ETags[n],
PartNumber: n + 1,
})),
},
UploadId: uploadId,
};
return s3Client.completeMultipartUpload(params, next);
},
], err => callback(err, uploadId));
}
function abortMPU(bucket, key, uploadId, callback) {
return s3Client.abortMultipartUpload({
Bucket: bucket,
Key: key,
UploadId: uploadId,
}, callback);
}
function uploadPartCopy(bucket, key, partNumber, partSize, sleepDuration, keyToCopy, callback) {
const ETags = [];
let uploadId = null;
const parts = 5;
const partNumbers = Array.from(Array(parts).keys());
const initiateMPUParams = {
Bucket: bucket,
Key: key,
};
return async.waterfall([
next => s3Client.createMultipartUpload(initiateMPUParams,
(err, data) => {
if (err) {
return next(err);
}
uploadId = data.UploadId;
return next();
}),
next => {
const uploadPartParams = {
Bucket: bucket,
Key: key,
PartNumber: partNumber + 1,
UploadId: uploadId,
Body: Buffer.alloc(partSize),
};
return s3Client.uploadPart(uploadPartParams, (err, data) => {
if (err) {
return next(err);
}
ETags[partNumber] = data.ETag;
return next();
});
},
next => wait(sleepDuration, next),
next => {
const copyPartParams = {
Bucket: bucket,
CopySource: `/${bucket}/${keyToCopy}`,
Key: `${key}-copy`,
PartNumber: partNumber + 1,
UploadId: uploadId,
};
return s3Client.uploadPartCopy(copyPartParams, (err, data) => {
if (err) {
return next(err);
}
ETags[partNumber] = data.ETag;
return next(null, data.ETag);
});
},
next => {
const params = {
Bucket: bucket,
Key: key,
MultipartUpload: {
Parts: partNumbers.map(n => ({
ETag: ETags[n],
PartNumber: n + 1,
})),
},
UploadId: uploadId,
};
return s3Client.completeMultipartUpload(params, next);
},
], err => callback(err, uploadId));
}
function restoreObject(bucket, key, callback) {
return s3Client.restoreObject({
Bucket: bucket,
Key: key,
RestoreRequest: {
Days: 1,
},
}, callback);
}
function multiObjectDelete(bucket, keys, callback) {
return s3Client.deleteObjects({
Bucket: bucket,
Delete: {
Objects: keys.map(key => ({ Key: key })),
},
}, callback);
}
describe('quota evaluation with scuba metrics', function t() {
this.timeout(30000);
const scuba = new MockScuba();
const putQuotaVerb = 'PUT';
const config = {
accessKey: memCredentials.default.accessKey,
secretKey: memCredentials.default.secretKey,
};
before(done => {
const config = getConfig('default', { signatureVersion: 'v4' });
s3Client = new S3(config);
scuba.start();
return wait(2000, done);
});
afterEach(() => {
scuba.reset();
});
after(() => {
scuba.stop();
});
it('should return QuotaExceeded when trying to PutObject in a bucket with quota', done => {
const bucket = 'quota-test-bucket1';
const key = 'quota-test-object';
const size = 1024;
return async.series([
next => createBucket(bucket, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, err => {
assert.strictEqual(err.code, 'QuotaExceeded');
return next();
}),
next => deleteBucket(bucket, next),
], done);
});
it('should return QuotaExceeded when trying to CopyObject in a bucket with quota', done => {
const bucket = 'quota-test-bucket2';
const key = 'quota-test-object';
const size = 900;
return async.series([
next => createBucket(bucket, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, next),
next => wait(inflightFlushFrequencyMS * 2, next),
next => copyObject(bucket, key, err => {
assert.strictEqual(err.code, 'QuotaExceeded');
return next();
}),
next => deleteObject(bucket, key, next),
next => deleteBucket(bucket, next),
], done);
});
it('should return QuotaExceeded when trying to complete MPU in a bucket with quota', done => {
const bucket = 'quota-test-bucket3';
const key = 'quota-test-object';
const parts = 5;
const partSize = 1024 * 1024 * 6;
let uploadId = null;
return async.series([
next => createBucket(bucket, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => objectMPU(bucket, key, parts, partSize, (err, _uploadId) => {
uploadId = _uploadId;
assert.strictEqual(err.code, 'QuotaExceeded');
return next();
}),
next => abortMPU(bucket, key, uploadId, next),
next => wait(inflightFlushFrequencyMS * 2, next),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), 0);
return next();
},
next => deleteBucket(bucket, next),
], done);
});
it('should not return QuotaExceeded if the quota is not exceeded', done => {
const bucket = 'quota-test-bucket4';
const key = 'quota-test-object';
const size = 300;
return async.series([
next => createBucket(bucket, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, err => {
assert.ifError(err);
return next();
}),
next => deleteObject(bucket, key, next),
next => deleteBucket(bucket, next),
], done);
});
it('should not evaluate quotas if the backend is not available', done => {
scuba.stop();
const bucket = 'quota-test-bucket5';
const key = 'quota-test-object';
const size = 1024;
return async.series([
next => createBucket(bucket, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, err => {
assert.ifError(err);
return next();
}),
next => deleteObject(bucket, key, next),
next => deleteBucket(bucket, next),
], err => {
assert.ifError(err);
scuba.start();
return wait(2000, done);
});
});
it('should return QuotaExceeded when trying to copy a part in a bucket with quota', done => {
const bucket = 'quota-test-bucket6';
const key = 'quota-test-object-copy';
const keyToCopy = 'quota-test-existing';
const parts = 5;
const partSize = 1024 * 1024 * 6;
let uploadId = null;
return async.series([
next => createBucket(bucket, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify({ quota: Math.round(partSize * 2.5) }), config)
.then(() => next()).catch(err => next(err)),
next => putObject(bucket, keyToCopy, partSize, next),
next => uploadPartCopy(bucket, key, parts, partSize, inflightFlushFrequencyMS * 2, keyToCopy,
(err, _uploadId) => {
uploadId = _uploadId;
assert.strictEqual(err.code, 'QuotaExceeded');
return next();
}),
next => abortMPU(bucket, key, uploadId, next),
next => deleteObject(bucket, keyToCopy, next),
next => deleteBucket(bucket, next),
], done);
});
it('should return QuotaExceeded when trying to restore an object in a bucket with quota', done => {
const bucket = 'quota-test-bucket7';
const key = 'quota-test-object';
const size = 900;
return async.series([
next => createBucket(bucket, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, err => {
assert.ifError(err);
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => restoreObject(bucket, key, err => {
assert.strictEqual(err.code, 'QuotaExceeded');
return next();
}),
next => deleteObject(bucket, key, next),
next => deleteBucket(bucket, next),
], done);
});
it('should allow writes after deleting data with quotas', done => {
const bucket = 'quota-test-bucket8';
const key = 'quota-test-object';
const size = 400;
return async.series([
next => createBucket(bucket, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, `${key}1`, size, err => {
assert.ifError(err);
return next();
}),
next => putObject(bucket, `${key}2`, size, err => {
assert.ifError(err);
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => putObject(bucket, `${key}3`, size, err => {
assert.strictEqual(err.code, 'QuotaExceeded');
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size * 2);
return next();
},
next => wait(inflightFlushFrequencyMS * 2, next),
next => deleteObject(bucket, `${key}2`, next),
next => wait(inflightFlushFrequencyMS * 2, next),
next => putObject(bucket, `${key}4`, size, err => {
assert.ifError(err);
return next();
}),
next => deleteObject(bucket, `${key}1`, next),
next => deleteObject(bucket, `${key}3`, next),
next => deleteObject(bucket, `${key}4`, next),
next => deleteBucket(bucket, next),
], done);
});
it('should not increase the inflights when the object is being rewritten with a smaller object', done => {
const bucket = 'quota-test-bucket9';
const key = 'quota-test-object';
const size = 400;
return async.series([
next => createBucket(bucket, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, key, size, err => {
assert.ifError(err);
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => putObject(bucket, key, size - 100, err => {
assert.ifError(err);
return next();
}),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), size);
return next();
},
next => deleteObject(bucket, key, next),
next => deleteBucket(bucket, next),
], done);
});
it('should decrease the inflights when performing multi object delete', done => {
const bucket = 'quota-test-bucket10';
const key = 'quota-test-object';
const size = 400;
return async.series([
next => createBucket(bucket, next),
next => sendRequest(putQuotaVerb, '127.0.0.1:8000', `/${bucket}/?quota=true`,
JSON.stringify(quota), config).then(() => next()).catch(err => next(err)),
next => putObject(bucket, `${key}1`, size, err => {
assert.ifError(err);
return next();
}
),
next => putObject(bucket, `${key}2`, size, err => {
assert.ifError(err);
return next();
}),
next => wait(inflightFlushFrequencyMS * 2, next),
next => multiObjectDelete(bucket, [`${key}1`, `${key}2`], err => {
assert.ifError(err);
return next();
}),
next => {
assert.strictEqual(scuba.getInflightsForBucket(bucket), 0);
return next();
},
next => deleteBucket(bucket, next),
], done);
});
});

View File

@ -393,65 +393,6 @@ describe('Config', () => {
});
});
describe('quota option setup', () => {
let oldConfig;
before(() => {
oldConfig = process.env.S3_CONFIG_FILE;
process.env.S3_CONFIG_FILE =
'tests/unit/testConfigs/allOptsConfig/config.json';
});
after(() => {
process.env.S3_CONFIG_FILE = oldConfig;
});
it('should set up quota', () => {
const { ConfigObject } = require('../../lib/Config');
const config = new ConfigObject();
assert.deepStrictEqual(
config.quota,
{
maxStaleness: 24 * 60 * 60 * 1000,
enableInflights: false,
},
);
});
it('should use environment variables for scuba', () => {
setEnv('QUOTA_MAX_STALENESS_MS', 1234);
setEnv('QUOTA_ENABLE_INFLIGHTS', 'true');
const { ConfigObject } = require('../../lib/Config');
const config = new ConfigObject();
assert.deepStrictEqual(
config.quota,
{
maxStaleness: 1234,
enableInflights: true,
},
);
});
it('should use the default if the maxStaleness is not a number', () => {
setEnv('QUOTA_MAX_STALENESS_MS', 'notanumber');
setEnv('QUOTA_ENABLE_INFLIGHTS', 'true');
const { ConfigObject } = require('../../lib/Config');
const config = new ConfigObject();
assert.deepStrictEqual(
config.quota,
{
maxStaleness: 24 * 60 * 60 * 1000,
enableInflights: true,
},
);
});
});
describe('utapi option setup', () => {
let oldConfig;

View File

@ -15,15 +15,13 @@ const sourceObject = 'objectsource';
const sourceVersionId = 'vid1';
describe('prepareRequestContexts', () => {
it('should return s3:DeleteObject if multiObjectDelete method', () => {
it('should return null if multiObjectDelete method', () => {
const apiMethod = 'multiObjectDelete';
const request = makeRequest();
const results = prepareRequestContexts(apiMethod, request, sourceBucket,
sourceObject, sourceVersionId);
assert.strictEqual(results.length, 1);
const expectedAction = 's3:DeleteObject';
assert.strictEqual(results[0].getAction(), expectedAction);
assert.strictEqual(results, null);
});
it('should return s3:PutObjectVersion request context action for objectPut method with x-scal-s3-version-id' +

View File

@ -1,623 +0,0 @@
const sinon = require('sinon');
const assert = require('assert');
const { config } = require('../../../../../lib/Config');
const {
validateQuotas,
processBytesToWrite,
isMetricStale,
} = require('../../../../../lib/api/apiUtils/quotas/quotaUtils');
const QuotaService = require('../../../../../lib/quotas/quotas');
const mockLog = {
warn: sinon.stub(),
debug: sinon.stub(),
};
const mockBucket = {
getQuota: () => 100,
getName: () => 'bucketName',
getCreationDate: () => '2022-01-01T00:00:00.000Z',
};
const mockBucketNoQuota = {
getQuota: () => 0,
getName: () => 'bucketName',
getCreationDate: () => '2022-01-01T00:00:00.000Z',
};
describe('validateQuotas (buckets)', () => {
const request = {
getQuota: () => 100,
};
beforeEach(() => {
config.scuba = {
host: 'localhost',
port: 8080,
};
config.quota = {
maxStaleness: 24 * 60 * 60 * 1000,
enableInflights: true,
};
config.isQuotaEnabled = sinon.stub().returns(true);
QuotaService.enabled = true;
QuotaService._getLatestMetricsCallback = sinon.stub().resolves({});
request.finalizerHooks = [];
});
afterEach(() => {
sinon.restore();
});
it('should return null if quota is <= 0', done => {
validateQuotas(request, mockBucketNoQuota, {}, [], '', false, false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false);
done();
});
});
it('should return null if scuba is disabled', done => {
QuotaService.enabled = false;
validateQuotas(request, mockBucket, {}, [], '', false, false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false);
done();
});
});
it('should return null if metrics retrieval fails', done => {
QuotaService.enabled = true;
const error = new Error('Failed to get metrics');
QuotaService._getLatestMetricsCallback.yields(error);
validateQuotas(request, mockBucket, {}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectPut',
inflight: 1,
}
), true);
done();
});
});
it('should return errors.QuotaExceeded if quota is exceeded', done => {
const result1 = {
bytesTotal: 150,
};
const result2 = {
bytesTotal: 120,
};
QuotaService._getLatestMetricsCallback.yields(null, result1);
QuotaService._getLatestMetricsCallback.yields(null, result2);
validateQuotas(request, mockBucket, {}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => {
assert.strictEqual(err.is.QuotaExceeded, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1);
assert.strictEqual(request.finalizerHooks.length, 1);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectPut',
inflight: 1,
}
), true);
done();
});
});
it('should not return QuotaExceeded if the quotas are exceeded but operation is a delete', done => {
const result1 = {
bytesTotal: 150,
};
const result2 = {
bytesTotal: 120,
};
QuotaService._getLatestMetricsCallback.yields(null, result1);
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
validateQuotas(request, mockBucket, {}, ['objectDelete'], 'objectDelete', -50, false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectDelete',
inflight: -50,
}
), true);
done();
});
});
it('should return null if quota is not exceeded', done => {
const result1 = {
bytesTotal: 80,
};
const result2 = {
bytesTotal: 90,
};
QuotaService._getLatestMetricsCallback.yields(null, result1);
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
validateQuotas(request, mockBucket, {}, ['objectRestore', 'objectPut'], 'objectRestore',
true, false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledTwice, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectRestore',
inflight: true,
}
), true);
done();
});
});
it('should not include the inflights in the request if they are disabled', done => {
config.quota.enableInflights = false;
const result1 = {
bytesTotal: 80,
};
const result2 = {
bytesTotal: 90,
};
QuotaService._getLatestMetricsCallback.yields(null, result1);
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
validateQuotas(request, mockBucket, {}, ['objectRestore', 'objectPut'], 'objectRestore',
true, false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledTwice, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectRestore',
inflight: undefined,
}
), true);
done();
});
});
it('should evaluate the quotas and not update the inflights when isStorageReserved is true', done => {
const result1 = {
bytesTotal: 80,
};
const result2 = {
bytesTotal: 90,
};
QuotaService._getLatestMetricsCallback.yields(null, result1);
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
validateQuotas(request, mockBucket, {}, ['objectPut'], 'objectPut',
true, true, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectPut',
inflight: 0,
}
), true);
done();
});
});
});
describe('validateQuotas (with accounts)', () => {
const request = {
getQuota: () => 100,
};
beforeEach(() => {
config.scuba = {
host: 'localhost',
port: 8080,
};
config.quota = {
maxStaleness: 24 * 60 * 60 * 1000,
enableInflights: true,
};
request.finalizerHooks = [];
config.isQuotaEnabled = sinon.stub().returns(true);
QuotaService.enabled = true;
QuotaService._getLatestMetricsCallback = sinon.stub().resolves({});
});
afterEach(() => {
sinon.restore();
});
it('should return null if quota is <= 0', done => {
validateQuotas(request, mockBucketNoQuota, {
account: 'test_1',
quota: 0,
}, [], '', false, false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false);
done();
});
});
it('should not return null if bucket quota is <= 0 but account quota is > 0', done => {
validateQuotas(request, mockBucketNoQuota, {
account: 'test_1',
quota: 1000,
}, [], '', false, false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false);
done();
});
});
it('should return null if scuba is disabled', done => {
QuotaService.enabled = false;
validateQuotas(request, mockBucket, {
account: 'test_1',
quota: 1000,
}, [], '', false, false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.called, false);
done();
});
});
it('should return null if metrics retrieval fails', done => {
QuotaService.enabled = true;
const error = new Error('Failed to get metrics');
QuotaService._getLatestMetricsCallback.yields(error);
validateQuotas(request, mockBucket, {
account: 'test_1',
quota: 1000,
}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledOnce, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectPut',
inflight: 1,
}
), true);
done();
});
});
it('should return errors.QuotaExceeded if quota is exceeded', done => {
const result1 = {
bytesTotal: 150,
};
const result2 = {
bytesTotal: 120,
};
QuotaService._getLatestMetricsCallback.yields(null, result1);
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
validateQuotas(request, mockBucketNoQuota, {
account: 'test_1',
quota: 100,
}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => {
assert.strictEqual(err.is.QuotaExceeded, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1);
assert.strictEqual(request.finalizerHooks.length, 1);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
'account',
'test_1',
null,
{
action: 'objectPut',
inflight: 1,
}
), true);
done();
});
});
it('should not return QuotaExceeded if the quotas are exceeded but operation is a delete', done => {
const result1 = {
bytesTotal: 150,
};
const result2 = {
bytesTotal: 120,
};
QuotaService._getLatestMetricsCallback.yields(null, result1);
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
validateQuotas(request, mockBucketNoQuota, {
account: 'test_1',
quota: 1000,
}, ['objectDelete'], 'objectDelete', -50, false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 1);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
'account',
'test_1',
null,
{
action: 'objectDelete',
inflight: -50,
}
), true);
done();
});
});
it('should return null if quota is not exceeded', done => {
const result1 = {
bytesTotal: 80,
};
const result2 = {
bytesTotal: 90,
};
QuotaService._getLatestMetricsCallback.yields(null, result1);
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
validateQuotas(request, mockBucket, {
account: 'test_1',
quota: 1000,
}, ['objectRestore', 'objectPut'], 'objectRestore', true, false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 4);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
'account',
'test_1',
null,
{
action: 'objectRestore',
inflight: true,
}
), true);
done();
});
});
it('should return quota exceeded if account and bucket quotas are different', done => {
const result1 = {
bytesTotal: 150,
};
const result2 = {
bytesTotal: 120,
};
QuotaService._getLatestMetricsCallback.yields(null, result1);
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
validateQuotas(request, mockBucket, {
account: 'test_1',
quota: 1000,
}, ['objectPut', 'getObject'], 'objectPut', 1, false, mockLog, err => {
assert.strictEqual(err.is.QuotaExceeded, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 2);
assert.strictEqual(request.finalizerHooks.length, 1);
done();
});
});
it('should update the request with one function per action to clear quota updates', done => {
const result1 = {
bytesTotal: 80,
};
const result2 = {
bytesTotal: 90,
};
QuotaService._getLatestMetricsCallback.yields(null, result1);
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
validateQuotas(request, mockBucket, {
account: 'test_1',
quota: 1000,
}, ['objectRestore', 'objectPut'], 'objectRestore', true, false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.callCount, 4);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
'account',
'test_1',
null,
{
action: 'objectRestore',
inflight: true,
}
), true);
done();
});
});
it('should evaluate the quotas and not update the inflights when isStorageReserved is true', done => {
const result1 = {
bytesTotal: 80,
};
const result2 = {
bytesTotal: 90,
};
QuotaService._getLatestMetricsCallback.yields(null, result1);
QuotaService._getLatestMetricsCallback.onCall(1).yields(null, result2);
validateQuotas(request, mockBucket, {
account: 'test_1',
quota: 1000,
}, ['objectPut'], 'objectPut', true, true, mockLog, err => {
assert.ifError(err);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledTwice, true);
assert.strictEqual(QuotaService._getLatestMetricsCallback.calledWith(
'account',
'test_1',
null,
{
action: 'objectPut',
inflight: 0,
}
), true);
done();
});
});
});
describe('processBytesToWrite', () => {
let bucket;
let versionId;
let contentLength;
let objMD;
beforeEach(() => {
bucket = {
isVersioningEnabled: sinon.stub(),
};
versionId = '';
contentLength = 0;
objMD = null;
});
it('should return a negative number if the operation is a delete and bucket is not versioned', () => {
bucket.isVersioningEnabled.returns(false);
objMD = { 'content-length': 100 };
const bytes = processBytesToWrite('objectDelete', bucket, versionId, contentLength, objMD);
assert.strictEqual(bytes, -100);
});
it('should return 0 if the operation is a delete and bucket is versioned', () => {
bucket.isVersioningEnabled.returns(true);
objMD = { 'content-length': 100 };
const bytes = processBytesToWrite('objectDelete', bucket, versionId, contentLength, objMD);
assert.strictEqual(bytes, 0);
});
it('should return a negative number for a versioned bucket with a versionid deletion', () => {
bucket.isVersioningEnabled.returns(true);
objMD = { 'content-length': 100 };
versionId = 'versionId';
const bytes = processBytesToWrite('objectDelete', bucket, versionId, contentLength, objMD);
assert.strictEqual(bytes, -100);
});
it('should return 0 for a delete operation if the object metadata is missing', () => {
bucket.isVersioningEnabled.returns(true);
objMD = null;
const bytes = processBytesToWrite('objectDelete', bucket, versionId, contentLength, objMD);
assert.strictEqual(bytes, 0);
});
it('should return the object metadata content length for a restore object operation', () => {
bucket.isVersioningEnabled.returns(true);
objMD = { 'content-length': 100 };
contentLength = 150;
const bytes = processBytesToWrite('objectRestore', bucket, versionId, contentLength, objMD);
assert.strictEqual(bytes, 100);
});
it('should return the difference of the content length if the object is being replaced', () => {
bucket.isVersioningEnabled.returns(false);
objMD = { 'content-length': 100 };
contentLength = 150;
const bytes = processBytesToWrite('objectPut', bucket, versionId, contentLength, objMD);
assert.strictEqual(bytes, 50);
});
it('should return content length if the object is being replaced and the bucket is versioned', () => {
bucket.isVersioningEnabled.returns(true);
objMD = { 'content-length': 100 };
contentLength = 150;
const bytes = processBytesToWrite('objectPut', bucket, versionId, contentLength, objMD);
assert.strictEqual(bytes, contentLength);
});
it('should return content length of the object metadata if the action is a copy (getObject authz)', () => {
bucket.isVersioningEnabled.returns(true);
objMD = { 'content-length': 100 };
const bytes = processBytesToWrite('objectCopy', bucket, versionId, 0, objMD);
assert.strictEqual(bytes, 100);
});
it('should return content length of the object metadata if the action is a copy part (getObject authz)', () => {
bucket.isVersioningEnabled.returns(true);
objMD = { 'content-length': 100 };
const bytes = processBytesToWrite('objectPutCopyPart', bucket, versionId, 0, objMD);
assert.strictEqual(bytes, 100);
});
it('should detect object replacement during copy object operation on a non versioned bucket', () => {
bucket.isVersioningEnabled.returns(false);
objMD = { 'content-length': 100 };
const destObjMD = { 'content-length': 20 };
const bytes = processBytesToWrite('objectCopy', bucket, versionId, 0, objMD, destObjMD);
assert.strictEqual(bytes, 80);
});
it('should not detect object replacement during copy object operation if the bucket is versioned', () => {
bucket.isVersioningEnabled.returns(true);
objMD = { 'content-length': 100 };
const destObjMD = { 'content-length': 20 };
const bytes = processBytesToWrite('objectCopy', bucket, versionId, 0, objMD, destObjMD);
assert.strictEqual(bytes, 100);
});
});
describe('isMetricStale', () => {
const metric = {
date: new Date().toISOString(),
};
const resourceType = 'bucket';
const resourceName = 'bucketName';
const action = 'objectPut';
const inflight = 1;
const log = {
warn: sinon.stub(),
};
it('should return false if the metric is not stale', () => {
const result = isMetricStale(metric, resourceType, resourceName, action, inflight, log);
assert.strictEqual(result, false);
assert.strictEqual(log.warn.called, false);
});
it('should return true and log a warning if the metric is stale', () => {
const staleDate = new Date(Date.now() - 24 * 60 * 60 * 1000 - 1);
metric.date = staleDate.toISOString();
const result = isMetricStale(metric, resourceType, resourceName, action, inflight, log);
assert.strictEqual(result, true);
assert.strictEqual(log.warn.calledOnce, true);
});
});

View File

@ -1,4 +1,3 @@
const crypto = require('crypto');
const assert = require('assert');
const { errors, storage } = require('arsenal');
@ -8,7 +7,6 @@ const multiObjectDelete = require('../../../lib/api/multiObjectDelete');
const { cleanup, DummyRequestLogger, makeAuthInfo } = require('../helpers');
const DummyRequest = require('../DummyRequest');
const { bucketPut } = require('../../../lib/api/bucketPut');
const metadataWrapper = require('../../../lib/metadata/wrapper');
const objectPut = require('../../../lib/api/objectPut');
const log = new DummyRequestLogger();
@ -27,7 +25,6 @@ const objectKey1 = 'objectName1';
const objectKey2 = 'objectName2';
const metadataUtils = require('../../../lib/metadata/metadataUtils');
const services = require('../../../lib/services');
const { BucketInfo } = require('arsenal/build/lib/models');
const testBucketPutRequest = new DummyRequest({
bucketName,
namespace,
@ -360,43 +357,3 @@ describe('decodeObjectVersion function helper', () => {
assert.deepStrictEqual(ret[1], undefined);
});
});
describe('multiObjectDelete function', () => {
afterEach(() => {
sinon.restore();
});
it('should not authorize the bucket and initial IAM authorization results', done => {
const post = '<Delete><Object><Key>objectname</Key></Object></Delete>';
const request = new DummyRequest({
bucketName: 'bucketname',
objectKey: 'objectname',
parsedHost: 'localhost',
headers: {
'content-md5': crypto.createHash('md5').update(post, 'utf8').digest('base64')
},
post,
url: '/bucketname',
});
const authInfo = makeAuthInfo('123456');
sinon.stub(metadataWrapper, 'getBucket').callsFake((bucketName, log, cb) =>
cb(null, new BucketInfo(
'bucketname',
'123456',
'accountA',
new Date().toISOString(),
15,
)));
multiObjectDelete.multiObjectDelete(authInfo, request, log, (err, res) => {
// Expected result is an access denied on the object, and no error, as the API was authorized
assert.strictEqual(err, null);
assert.strictEqual(
res.includes('<Error><Key>objectname</Key><Code>AccessDenied</Code>'),
true
);
done();
});
});
});

View File

@ -1,13 +1,33 @@
const sinon = require('sinon');
const assert = require('assert');
const {
checkBucketAcls,
checkObjectAcls,
validatePolicyConditions,
validateQuotas,
} = require('../../../lib/api/apiUtils/authorization/permissionChecks');
const constants = require('../../../constants');
const { ScubaClientInstance } = require('../../../lib/scuba/wrapper');
const { bucketOwnerActions, logId } = constants;
const mockBucket = {
getQuota: () => 100,
getName: () => 'bucketName',
getCreationDate: () => '2022-01-01T00:00:00.000Z',
};
const mockBucketNoQuota = {
getQuota: () => 100,
getName: () => 'bucketName',
getCreationDate: () => '2022-01-01T00:00:00.000Z',
};
const mockLog = {
warn: sinon.stub(),
debug: sinon.stub(),
};
describe('checkBucketAcls', () => {
const mockBucket = {
getOwner: () => 'ownerId',
@ -534,3 +554,307 @@ describe('validatePolicyConditions', () => {
});
});
});
describe('validateQuotas (buckets)', () => {
beforeEach(() => {
ScubaClientInstance.enabled = true;
ScubaClientInstance.getLatestMetrics = sinon.stub().resolves({});
});
afterEach(() => {
sinon.restore();
});
it('should return null if quota is <= 0 or scuba is disabled', done => {
validateQuotas(mockBucketNoQuota, {}, [], '', false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.called, false);
done();
});
});
it('should return null if scuba is disabled', done => {
ScubaClientInstance.enabled = false;
validateQuotas(mockBucket, {}, [], '', false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.called, false);
done();
});
});
it('should return null if metrics retrieval fails', done => {
ScubaClientInstance.enabled = true;
const error = new Error('Failed to get metrics');
ScubaClientInstance.getLatestMetrics.rejects(error);
validateQuotas(mockBucket, {}, ['objectPut', 'getObject'], 'objectPut', 1, mockLog, err => {
assert.ifError(err);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledOnce, true);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectPut',
inflight: 1,
}
), true);
done();
});
});
it('should return errors.QuotaExceeded if quota is exceeded', done => {
const result1 = {
bytesTotal: 150,
};
const result2 = {
bytesTotal: 120,
};
ScubaClientInstance.getLatestMetrics.resolves(result1);
ScubaClientInstance.getLatestMetrics.resolves(result2);
validateQuotas(mockBucket, {}, ['objectPut', 'getObject'], 'objectPut', 1, mockLog, err => {
assert.strictEqual(err.is.QuotaExceeded, true);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledTwice, true);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectPut',
inflight: 1,
}
), true);
done();
});
});
it('should not return QuotaExceeded if the quotas are exceeded but operation is a delete', done => {
const result1 = {
bytesTotal: 150,
};
const result2 = {
bytesTotal: 120,
};
ScubaClientInstance.getLatestMetrics.resolves(result1);
ScubaClientInstance.getLatestMetrics.resolves(result2);
validateQuotas(mockBucket, {}, ['objectDelete'], 'objectDelete', -50, mockLog, err => {
assert.ifError(err);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledOnce, true);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectDelete',
inflight: -50,
}
), true);
done();
});
});
it('should return null if quota is not exceeded', done => {
const result1 = {
bytesTotal: 80,
};
const result2 = {
bytesTotal: 90,
};
ScubaClientInstance.getLatestMetrics.resolves(result1);
ScubaClientInstance.getLatestMetrics.resolves(result2);
validateQuotas(mockBucket, {}, ['objectRestore', 'objectPut'], 'objectRestore',
true, mockLog, err => {
assert.ifError(err);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledTwice, true);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectRestore',
inflight: true,
}
), true);
done();
});
});
});
describe('validateQuotas (with accounts)', () => {
beforeEach(() => {
ScubaClientInstance.enabled = true;
ScubaClientInstance.getLatestMetrics = sinon.stub().resolves({});
});
afterEach(() => {
sinon.restore();
});
it('should return null if quota is <= 0 or scuba is disabled', done => {
validateQuotas(mockBucketNoQuota, {
account: 'test_1',
quota: 0,
}, [], '', false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.called, false);
done();
});
});
it('should not return null if bucket quota is <= 0 but account quota is > 0', done => {
validateQuotas(mockBucketNoQuota, {
account: 'test_1',
quota: 1000,
}, [], '', false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.called, false);
done();
});
});
it('should return null if scuba is disabled', done => {
ScubaClientInstance.enabled = false;
validateQuotas(mockBucket, {
account: 'test_1',
quota: 1000,
}, [], '', false, mockLog, err => {
assert.ifError(err);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.called, false);
done();
});
});
it('should return null if metrics retrieval fails', done => {
ScubaClientInstance.enabled = true;
const error = new Error('Failed to get metrics');
ScubaClientInstance.getLatestMetrics.rejects(error);
validateQuotas(mockBucket, {
account: 'test_1',
quota: 1000,
}, ['objectPut', 'getObject'], 'objectPut', 1, mockLog, err => {
assert.ifError(err);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledOnce, true);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectPut',
inflight: 1,
}
), true);
done();
});
});
it('should return errors.QuotaExceeded if quota is exceeded', done => {
const result1 = {
bytesTotal: 150,
};
const result2 = {
bytesTotal: 120,
};
ScubaClientInstance.getLatestMetrics.resolves(result1);
ScubaClientInstance.getLatestMetrics.resolves(result2);
validateQuotas(mockBucket, {
account: 'test_1',
quota: 100,
}, ['objectPut', 'getObject'], 'objectPut', 1, mockLog, err => {
assert.strictEqual(err.is.QuotaExceeded, true);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.callCount, 4);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectPut',
inflight: 1,
}
), true);
done();
});
});
it('should not return QuotaExceeded if the quotas are exceeded but operation is a delete', done => {
const result1 = {
bytesTotal: 150,
};
const result2 = {
bytesTotal: 120,
};
ScubaClientInstance.getLatestMetrics.resolves(result1);
ScubaClientInstance.getLatestMetrics.resolves(result2);
validateQuotas(mockBucket, {
account: 'test_1',
quota: 1000,
}, ['objectDelete'], 'objectDelete', -50, mockLog, err => {
assert.ifError(err);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledTwice, true);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectDelete',
inflight: -50,
}
), true);
done();
});
});
it('should return null if quota is not exceeded', done => {
const result1 = {
bytesTotal: 80,
};
const result2 = {
bytesTotal: 90,
};
ScubaClientInstance.getLatestMetrics.resolves(result1);
ScubaClientInstance.getLatestMetrics.resolves(result2);
validateQuotas(mockBucket, {
account: 'test_1',
quota: 1000,
}, ['objectRestore', 'objectPut'], 'objectRestore', true, mockLog, err => {
assert.ifError(err);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.callCount, 4);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.calledWith(
'bucket',
'bucketName_1640995200000',
null,
{
action: 'objectRestore',
inflight: true,
}
), true);
done();
});
});
it('should return quota exceeded if account and bucket quotas are different', done => {
const result1 = {
bytesTotal: 150,
};
const result2 = {
bytesTotal: 120,
};
ScubaClientInstance.getLatestMetrics.resolves(result1);
ScubaClientInstance.getLatestMetrics.resolves(result2);
validateQuotas(mockBucket, {
account: 'test_1',
quota: 1000,
}, ['objectPut', 'getObject'], 'objectPut', 1, mockLog, err => {
assert.strictEqual(err.is.QuotaExceeded, true);
assert.strictEqual(ScubaClientInstance.getLatestMetrics.callCount, 4);
done();
});
});
});

View File

@ -0,0 +1,82 @@
const assert = require('assert');
const {
createWSAgent,
} = require('../../../lib/management/push');
const proxy = 'http://proxy:3128/';
const logger = { info: () => {} };
function testVariableSet(httpProxy, httpsProxy, allProxy, noProxy) {
return () => {
it(`should use ${httpProxy} environment variable`, () => {
let agent = createWSAgent('https://pushserver', {
[httpProxy]: 'http://proxy:3128',
}, logger);
assert.equal(agent, null);
agent = createWSAgent('http://pushserver', {
[httpProxy]: proxy,
}, logger);
assert.equal(agent.proxy.href, proxy);
});
it(`should use ${httpsProxy} environment variable`, () => {
let agent = createWSAgent('http://pushserver', {
[httpsProxy]: proxy,
}, logger);
assert.equal(agent, null);
agent = createWSAgent('https://pushserver', {
[httpsProxy]: proxy,
}, logger);
assert.equal(agent.proxy.href, proxy);
});
it(`should use ${allProxy} environment variable`, () => {
let agent = createWSAgent('http://pushserver', {
[allProxy]: proxy,
}, logger);
assert.equal(agent.proxy.href, proxy);
agent = createWSAgent('https://pushserver', {
[allProxy]: proxy,
}, logger);
assert.equal(agent.proxy.href, proxy);
});
it(`should use ${noProxy} environment variable`, () => {
let agent = createWSAgent('http://pushserver', {
[noProxy]: 'pushserver',
}, logger);
assert.equal(agent, null);
agent = createWSAgent('http://pushserver', {
[noProxy]: 'pushserver',
[httpProxy]: proxy,
}, logger);
assert.equal(agent, null);
agent = createWSAgent('http://pushserver', {
[noProxy]: 'pushserver2',
[httpProxy]: proxy,
}, logger);
assert.equal(agent.proxy.href, proxy);
});
};
}
describe('Websocket connection agent', () => {
describe('with no proxy env', () => {
it('should handle empty proxy environment', () => {
const agent = createWSAgent('https://pushserver', {}, logger);
assert.equal(agent, null);
});
});
describe('with lowercase proxy env',
testVariableSet('http_proxy', 'https_proxy', 'all_proxy', 'no_proxy'));
describe('with uppercase proxy env',
testVariableSet('HTTP_PROXY', 'HTTPS_PROXY', 'ALL_PROXY', 'NO_PROXY'));
});

View File

@ -0,0 +1,239 @@
const assert = require('assert');
const crypto = require('crypto');
const { DummyRequestLogger } = require('../helpers');
const log = new DummyRequestLogger();
const metadata = require('../../../lib/metadata/wrapper');
const managementDatabaseName = 'PENSIEVE';
const tokenConfigurationKey = 'auth/zenko/remote-management-token';
const { privateKey, accessKey, decryptedSecretKey, secretKey, canonicalId,
userName } = require('./resources.json');
const shortid = '123456789012';
const email = 'customaccount1@setbyenv.com';
const arn = 'arn:aws:iam::123456789012:root';
const { config } = require('../../../lib/Config');
const {
remoteOverlayIsNewer,
patchConfiguration,
} = require('../../../lib/management/configuration');
const {
initManagementDatabase,
} = require('../../../lib/management/index');
function initManagementCredentialsMock(cb) {
return metadata.putObjectMD(managementDatabaseName,
tokenConfigurationKey, { privateKey }, {},
log, error => cb(error));
}
function getConfig() {
return config;
}
// Original Config
const overlayVersionOriginal = Object.assign({}, config.overlayVersion);
const authDataOriginal = Object.assign({}, config.authData);
const locationConstraintsOriginal = Object.assign({},
config.locationConstraints);
const restEndpointsOriginal = Object.assign({}, config.restEndpoints);
const browserAccessEnabledOriginal = config.browserAccessEnabled;
const instanceId = '19683e55-56f7-4a4c-98a7-706c07e4ec30';
const publicInstanceId = crypto.createHash('sha256')
.update(instanceId)
.digest('hex');
function resetConfig() {
config.overlayVersion = overlayVersionOriginal;
config.authData = authDataOriginal;
config.locationConstraints = locationConstraintsOriginal;
config.restEndpoints = restEndpointsOriginal;
config.browserAccessEnabled = browserAccessEnabledOriginal;
}
function assertConfig(actualConf, expectedConf) {
Object.keys(expectedConf).forEach(key => {
assert.deepStrictEqual(actualConf[key], expectedConf[key]);
});
}
describe('patchConfiguration', () => {
before(done => initManagementDatabase(log, err => {
if (err) {
return done(err);
}
return initManagementCredentialsMock(done);
}));
beforeEach(() => {
resetConfig();
});
it('should modify config using the new config', done => {
const newConf = {
version: 1,
instanceId,
users: [
{
secretKey,
accessKey,
canonicalId,
userName,
},
],
endpoints: [
{
hostname: '1.1.1.1',
locationName: 'us-east-1',
},
],
locations: {
'us-east-1': {
name: 'us-east-1',
objectId: 'us-east-1',
locationType: 'location-file-v1',
legacyAwsBehavior: true,
details: {},
},
},
browserAccess: {
enabled: true,
},
};
return patchConfiguration(newConf, log, err => {
assert.ifError(err);
const actualConf = getConfig();
const expectedConf = {
overlayVersion: 1,
publicInstanceId,
browserAccessEnabled: true,
authData: {
accounts: [{
name: userName,
email,
arn,
canonicalID: canonicalId,
shortid,
keys: [{
access: accessKey,
secret: decryptedSecretKey,
}],
}],
},
locationConstraints: {
'us-east-1': {
type: 'file',
objectId: 'us-east-1',
legacyAwsBehavior: true,
isTransient: false,
sizeLimitGB: null,
details: { supportsVersioning: true },
name: 'us-east-1',
locationType: 'location-file-v1',
},
},
};
assertConfig(actualConf, expectedConf);
assert.deepStrictEqual(actualConf.restEndpoints['1.1.1.1'],
'us-east-1');
return done();
});
});
it('should apply second configuration if version (2) is greater than ' +
'overlayVersion (1)', done => {
const newConf1 = {
version: 1,
instanceId,
};
const newConf2 = {
version: 2,
instanceId,
browserAccess: {
enabled: true,
},
};
patchConfiguration(newConf1, log, err => {
assert.ifError(err);
return patchConfiguration(newConf2, log, err => {
assert.ifError(err);
const actualConf = getConfig();
const expectedConf = {
overlayVersion: 2,
browserAccessEnabled: true,
};
assertConfig(actualConf, expectedConf);
return done();
});
});
});
it('should not apply the second configuration if version equals ' +
'overlayVersion', done => {
const newConf1 = {
version: 1,
instanceId,
};
const newConf2 = {
version: 1,
instanceId,
browserAccess: {
enabled: true,
},
};
patchConfiguration(newConf1, log, err => {
assert.ifError(err);
return patchConfiguration(newConf2, log, err => {
assert.ifError(err);
const actualConf = getConfig();
const expectedConf = {
overlayVersion: 1,
browserAccessEnabled: undefined,
};
assertConfig(actualConf, expectedConf);
return done();
});
});
});
});
describe('remoteOverlayIsNewer', () => {
it('should return remoteOverlayIsNewer equals false if remote overlay ' +
'is less than the cached', () => {
const cachedOverlay = {
version: 2,
};
const remoteOverlay = {
version: 1,
};
const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay,
remoteOverlay);
assert.equal(isRemoteOverlayNewer, false);
});
it('should return remoteOverlayIsNewer equals false if remote overlay ' +
'and the cached one are equal', () => {
const cachedOverlay = {
version: 1,
};
const remoteOverlay = {
version: 1,
};
const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay,
remoteOverlay);
assert.equal(isRemoteOverlayNewer, false);
});
it('should return remoteOverlayIsNewer equals true if remote overlay ' +
'version is greater than the cached one ', () => {
const cachedOverlay = {
version: 0,
};
const remoteOverlay = {
version: 1,
};
const isRemoteOverlayNewer = remoteOverlayIsNewer(cachedOverlay,
remoteOverlay);
assert.equal(isRemoteOverlayNewer, true);
});
});

View File

@ -0,0 +1,9 @@
{
"privateKey": "-----BEGIN RSA PRIVATE KEY-----\r\nMIIEowIBAAKCAQEAj13sSYE40lAX2qpBvfdGfcSVNtBf8i5FH+E8FAhORwwPu+2S\r\n3yBQbgwHq30WWxunGb1NmZL1wkVZ+vf12DtxqFRnMA08LfO4oO6oC4V8XfKeuHyJ\r\n1qlaKRINz6r9yDkTHtwWoBnlAINurlcNKgGD5p7D+G26Chbr/Oo0ZwHula9DxXy6\r\neH8/bJ5/BynyNyyWRPoAO+UkUdY5utkFCUq2dbBIhovMgjjikf5p2oWqnRKXc+JK\r\nBegr6lSHkkhyqNhTmd8+wA+8Cace4sy1ajY1t5V4wfRZea5vwl/HlyyKodvHdxng\r\nJgg6H61JMYPkplY6Gr9OryBKEAgq02zYoYTDfwIDAQABAoIBAAuDYGlavkRteCzw\r\nRU1LIVcSRWVcgIgDXTu9K8T0Ec0008Kkxomyn6LmxmroJbZ1VwsDH8s4eRH73ckA\r\nxrZxt6Pr+0lplq6eBvKtl8MtGhq1VDe+kJczjHEF6SQHOFAu/TEaPZrn2XMcGvRX\r\nO1BnRL9tepFlxm3u/06VRFYNWqqchM+tFyzLu2AuiuKd5+slSX7KZvVgdkY1ErKH\r\ngB75lPyhPb77C/6ptqUisVMSO4JhLhsD0+ekDVY982Sb7KkI+szdWSbtMx9Ek2Wo\r\ntXwJz7I8T7IbODy9aW9G+ydyhMDFmaEYIaDVFKJj5+fluNza3oQ5PtFNVE50GQJA\r\nsisGqfECgYEAwpkwt0KpSamSEH6qknNYPOwxgEuXWoFVzibko7is2tFPvY+YJowb\r\n68MqHIYhf7gHLq2dc5Jg1TTbGqLECjVxp4xLU4c95KBy1J9CPAcuH4xQLDXmeLzP\r\nJ2YgznRocbzAMCDAwafCr3uY9FM7oGDHAi5bE5W11xWx+9MlFExL3JkCgYEAvJp5\r\nf+JGN1W037bQe2QLYUWGszewZsvplnNOeytGQa57w4YdF42lPhMz6Kc/zdzKZpN9\r\njrshiIDhAD5NCno6dwqafBAW9WZl0sn7EnlLhD4Lwm8E9bRHnC9H82yFuqmNrzww\r\nzxBCQogJISwHiVz4EkU48B283ecBn0wT/fAa19cCgYEApKWsnEHgrhy1IxOpCoRh\r\nUhqdv2k1xDPN/8DUjtnAFtwmVcLa/zJopU/Zn4y1ZzSzjwECSTi+iWZRQ/YXXHPf\r\nl92SFjhFW92Niuy8w8FnevXjF6T7PYiy1SkJ9OR1QlZrXc04iiGBDazLu115A7ce\r\nanACS03OLw+CKgl6Q/RR83ECgYBCUngDVoimkMcIHHt3yJiP3ikeAKlRnMdJlsa0\r\nXWVZV4hCG3lDfRXsnEgWuimftNKf+6GdfYSvQdLdiQsCcjT5A4uLsQTByv5nf4uA\r\n1ZKOsFrmRrARzxGXhLDikvj7yP//7USkq+0BBGFhfuAvl7fMhPceyPZPehqB7/jf\r\nxX1LBQKBgAn5GgSXzzS0e06ZlP/VrKxreOHa5Z8wOmqqYQ0QTeczAbNNmuITdwwB\r\nNkbRqpVXRIfuj0BQBegAiix8om1W4it0cwz54IXBwQULxJR1StWxj3jo4QtpMQ+z\r\npVPdB1Ilb9zPV1YvDwRfdS1xsobzznAx56ecsXduZjs9mF61db8Q\r\n-----END RSA PRIVATE KEY-----\r\n",
"publicKey": "-----BEGIN PUBLIC KEY-----\r\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAj13sSYE40lAX2qpBvfdG\r\nfcSVNtBf8i5FH+E8FAhORwwPu+2S3yBQbgwHq30WWxunGb1NmZL1wkVZ+vf12Dtx\r\nqFRnMA08LfO4oO6oC4V8XfKeuHyJ1qlaKRINz6r9yDkTHtwWoBnlAINurlcNKgGD\r\n5p7D+G26Chbr/Oo0ZwHula9DxXy6eH8/bJ5/BynyNyyWRPoAO+UkUdY5utkFCUq2\r\ndbBIhovMgjjikf5p2oWqnRKXc+JKBegr6lSHkkhyqNhTmd8+wA+8Cace4sy1ajY1\r\nt5V4wfRZea5vwl/HlyyKodvHdxngJgg6H61JMYPkplY6Gr9OryBKEAgq02zYoYTD\r\nfwIDAQAB\r\n-----END PUBLIC KEY-----\r\n",
"accessKey": "QXP3VDG3SALNBX2QBJ1C",
"secretKey": "K5FyqZo5uFKfw9QBtn95o6vuPuD0zH/1seIrqPKqGnz8AxALNSx6EeRq7G1I6JJpS1XN13EhnwGn2ipsml3Uf2fQ00YgEmImG8wzGVZm8fWotpVO4ilN4JGyQCah81rNX4wZ9xHqDD7qYR5MyIERxR/osoXfctOwY7GGUjRKJfLOguNUlpaovejg6mZfTvYAiDF+PTO1sKUYqHt1IfKQtsK3dov1EFMBB5pWM7sVfncq/CthKN5M+VHx9Y87qdoP3+7AW+RCBbSDOfQgxvqtS7PIAf10mDl8k2kEURLz+RqChu4O4S0UzbEmtja7wa7WYhYKv/tM/QeW7kyNJMmnPg==",
"decryptedSecretKey": "n7PSZ3U6SgerF9PCNhXYsq3S3fRKVGdZTicGV8Ur",
"canonicalId": "79a59df900b949e55d96a1e698fbacedfd6e09d98eacf8f8d5218e7cd47ef2be",
"userName": "orbituser"
}

View File

@ -0,0 +1,43 @@
const assert = require('assert');
const { getCapabilities } = require('../../../lib/utilities/reportHandler');
// Ensures that expected features are enabled even if they
// rely on optional dependencies (such as secureChannelOptimizedPath)
describe('report handler', () => {
it('should report current capabilities', () => {
const c = getCapabilities();
assert.strictEqual(c.locationTypeDigitalOcean, true);
assert.strictEqual(c.locationTypeS3Custom, true);
assert.strictEqual(c.locationTypeSproxyd, true);
assert.strictEqual(c.locationTypeHyperdriveV2, true);
assert.strictEqual(c.locationTypeLocal, true);
assert.strictEqual(c.preferredReadLocation, true);
assert.strictEqual(c.managedLifecycle, true);
assert.strictEqual(c.secureChannelOptimizedPath, true);
assert.strictEqual(c.s3cIngestLocation, true);
});
[
{ value: 'true', result: true },
{ value: 'TRUE', result: true },
{ value: 'tRuE', result: true },
{ value: '1', result: true },
{ value: 'false', result: false },
{ value: 'FALSE', result: false },
{ value: 'FaLsE', result: false },
{ value: '0', result: false },
{ value: 'foo', result: false },
{ value: '', result: true },
{ value: undefined, result: true },
].forEach(param =>
it(`should allow set local file system capability ${param.value}`, () => {
const OLD_ENV = process.env;
if (param.value !== undefined) process.env.LOCAL_VOLUME_CAPABILITY = param.value;
assert.strictEqual(getCapabilities().locationTypeLocal, param.result);
process.env = OLD_ENV;
})
);
});

View File

@ -0,0 +1,72 @@
const assert = require('assert');
const {
ChannelMessageV0,
MessageType,
TargetType,
} = require('../../../lib/management/ChannelMessageV0');
const {
CONFIG_OVERLAY_MESSAGE,
METRICS_REQUEST_MESSAGE,
METRICS_REPORT_MESSAGE,
CHANNEL_CLOSE_MESSAGE,
CHANNEL_PAYLOAD_MESSAGE,
} = MessageType;
const { TARGET_ANY } = TargetType;
describe('ChannelMessageV0', () => {
describe('codec', () => {
it('should roundtrip metrics report', () => {
const b = ChannelMessageV0.encodeMetricsReportMessage({ a: 1 });
const m = new ChannelMessageV0(b);
assert.strictEqual(METRICS_REPORT_MESSAGE, m.getType());
assert.strictEqual(0, m.getChannelNumber());
assert.strictEqual(m.getTarget(), TARGET_ANY);
assert.strictEqual(m.getPayload().toString(), '{"a":1}');
});
it('should roundtrip channel data', () => {
const data = new Buffer('dummydata');
const b = ChannelMessageV0.encodeChannelDataMessage(50, data);
const m = new ChannelMessageV0(b);
assert.strictEqual(CHANNEL_PAYLOAD_MESSAGE, m.getType());
assert.strictEqual(50, m.getChannelNumber());
assert.strictEqual(m.getTarget(), TARGET_ANY);
assert.strictEqual(m.getPayload().toString(), 'dummydata');
});
it('should roundtrip channel close', () => {
const b = ChannelMessageV0.encodeChannelCloseMessage(3);
const m = new ChannelMessageV0(b);
assert.strictEqual(CHANNEL_CLOSE_MESSAGE, m.getType());
assert.strictEqual(3, m.getChannelNumber());
assert.strictEqual(m.getTarget(), TARGET_ANY);
});
});
describe('decoder', () => {
it('should parse metrics request', () => {
const b = new Buffer([METRICS_REQUEST_MESSAGE, 0, 0]);
const m = new ChannelMessageV0(b);
assert.strictEqual(METRICS_REQUEST_MESSAGE, m.getType());
assert.strictEqual(0, m.getChannelNumber());
assert.strictEqual(m.getTarget(), TARGET_ANY);
});
it('should parse overlay push', () => {
const b = new Buffer([CONFIG_OVERLAY_MESSAGE, 0, 0, 34, 65, 34]);
const m = new ChannelMessageV0(b);
assert.strictEqual(CONFIG_OVERLAY_MESSAGE, m.getType());
assert.strictEqual(0, m.getChannelNumber());
assert.strictEqual(m.getTarget(), TARGET_ANY);
assert.strictEqual(m.getPayload().toString(), '"A"');
});
});
});

View File

@ -14,11 +14,7 @@ const bucket = new BucketInfo('niftyBucket', ownerCanonicalId,
authInfo.getAccountDisplayName(), creationDate);
const log = new DummyRequestLogger();
const {
validateBucket,
metadataGetObjects,
metadataGetObject,
} = require('../../../lib/metadata/metadataUtils');
const { validateBucket, metadataGetObjects, metadataGetObject } = require('../../../lib/metadata/metadataUtils');
const metadata = require('../../../lib/metadata/wrapper');
describe('validateBucket', () => {

View File

@ -1,13 +1,13 @@
const assert = require('assert');
const sinon = require('sinon');
const { ScubaClientImpl } = require('../../../../lib/quotas/scuba/wrapper');
const { ScubaClientImpl } = require('../../../lib/scuba/wrapper');
describe('ScubaClientImpl', () => {
let client;
let log;
beforeEach(() => {
client = new ScubaClientImpl({ scuba: true, quota: { maxStaleness: 24 * 60 * 60 * 1000 } });
client = new ScubaClientImpl({ scuba: true });
log = {
info: sinon.spy(),
warn: sinon.spy(),
@ -27,7 +27,7 @@ describe('ScubaClientImpl', () => {
});
it('should not enable Scuba if config.scuba is falsy', () => {
client = new ScubaClientImpl({ scuba: false, quota: { maxStaleness: 24 * 60 * 60 * 1000 } });
client = new ScubaClientImpl({ scuba: false });
client.setup(log);
assert.strictEqual(client.enabled, false);
@ -43,22 +43,6 @@ describe('ScubaClientImpl', () => {
assert.strictEqual(client.enabled, true);
});
it('should disable Scuba if health check returns non-stale data', async () => {
sinon.stub(client, 'healthCheck').resolves({ date: Date.now() - (12 * 60 * 60 * 1000) });
await client._healthCheck();
assert.strictEqual(client.enabled, true);
});
it('should disable Scuba if health check returns stale data', async () => {
sinon.stub(client, 'healthCheck').resolves({ date: Date.now() - (48 * 60 * 60 * 1000) });
await client._healthCheck();
assert.strictEqual(client.enabled, false);
});
it('should disable Scuba if health check fails', async () => {
const error = new Error('Health check failed');
sinon.stub(client, 'healthCheck').rejects(error);

View File

@ -104,9 +104,6 @@
"host": "localhost",
"port": 8100
},
"quota": {
"maxStalenessMS": 86400000
},
"utapi": {
"redis": {
"host": "localhost",

View File

@ -1,254 +0,0 @@
const assert = require('assert');
const { parseRedisConfig } = require('../../../lib/Config');
describe('parseRedisConfig', () => {
[
{
desc: 'with host and port',
input: {
host: 'localhost',
port: 6479,
},
},
{
desc: 'with host, port and password',
input: {
host: 'localhost',
port: 6479,
password: 'mypass',
},
},
{
desc: 'with host, port and an empty password',
input: {
host: 'localhost',
port: 6479,
password: '',
},
},
{
desc: 'with host, port and an empty retry config',
input: {
host: 'localhost',
port: 6479,
retry: {
},
},
},
{
desc: 'with host, port and a custom retry config',
input: {
host: 'localhost',
port: 6479,
retry: {
connectBackoff: {
min: 10,
max: 1000,
jitter: 0.1,
factor: 1.5,
deadline: 10000,
},
},
},
},
{
desc: 'with a single sentinel and no sentinel password',
input: {
name: 'myname',
sentinels: [
{
host: 'localhost',
port: 16479,
},
],
},
},
{
desc: 'with two sentinels and a sentinel password',
input: {
name: 'myname',
sentinels: [
{
host: '10.20.30.40',
port: 16479,
},
{
host: '10.20.30.41',
port: 16479,
},
],
sentinelPassword: 'mypass',
},
},
{
desc: 'with a sentinel and an empty sentinel password',
input: {
name: 'myname',
sentinels: [
{
host: '10.20.30.40',
port: 16479,
},
],
sentinelPassword: '',
},
},
{
desc: 'with a basic production-like config with sentinels',
input: {
name: 'scality-s3',
password: '',
sentinelPassword: '',
sentinels: [
{
host: 'storage-1',
port: 16379,
},
{
host: 'storage-2',
port: 16379,
},
{
host: 'storage-3',
port: 16379,
},
{
host: 'storage-4',
port: 16379,
},
{
host: 'storage-5',
port: 16379,
},
],
},
},
{
desc: 'with a single sentinel passed as a string',
input: {
name: 'myname',
sentinels: '10.20.30.40:16479',
},
output: {
name: 'myname',
sentinels: [
{
host: '10.20.30.40',
port: 16479,
},
],
},
},
{
desc: 'with a list of sentinels passed as a string',
input: {
name: 'myname',
sentinels: '10.20.30.40:16479,another-host:16480,10.20.30.42:16481',
sentinelPassword: 'mypass',
},
output: {
name: 'myname',
sentinels: [
{
host: '10.20.30.40',
port: 16479,
},
{
host: 'another-host',
port: 16480,
},
{
host: '10.20.30.42',
port: 16481,
},
],
sentinelPassword: 'mypass',
},
},
].forEach(testCase => {
it(`should parse a valid config ${testCase.desc}`, () => {
const redisConfig = parseRedisConfig(testCase.input);
assert.deepStrictEqual(redisConfig, testCase.output || testCase.input);
});
});
[
{
desc: 'that is empty',
input: {},
},
{
desc: 'with only a host',
input: {
host: 'localhost',
},
},
{
desc: 'with only a port',
input: {
port: 6479,
},
},
{
desc: 'with a custom retry config with missing values',
input: {
host: 'localhost',
port: 6479,
retry: {
connectBackoff: {
},
},
},
},
{
desc: 'with a sentinel but no name',
input: {
sentinels: [
{
host: 'localhost',
port: 16479,
},
],
},
},
{
desc: 'with a sentinel but an empty name',
input: {
name: '',
sentinels: [
{
host: 'localhost',
port: 16479,
},
],
},
},
{
desc: 'with an empty list of sentinels',
input: {
name: 'myname',
sentinels: [],
},
},
{
desc: 'with an empty list of sentinels passed as a string',
input: {
name: 'myname',
sentinels: '',
},
},
{
desc: 'with an invalid list of sentinels passed as a string (missing port)',
input: {
name: 'myname',
sentinels: '10.20.30.40:16479,10.20.30.50',
},
},
].forEach(testCase => {
it(`should fail to parse an invalid config ${testCase.desc}`, () => {
assert.throws(() => {
parseRedisConfig(testCase.input);
});
});
});
});

View File

@ -1,6 +1,5 @@
const { errors } = require('arsenal');
const express = require('express');
const { config } = require('../../../lib/Config');
const inflightFlushFrequencyMS = 200;
@ -12,7 +11,6 @@ class Scuba {
bucket: new Map(),
};
this._app = express();
this.supportsInflight = config.isQuotaInflightEnabled();
}
_initiateRoutes() {
@ -27,13 +25,7 @@ class Scuba {
});
this._app.post('/metrics/bucket/:bucket/latest', (req, res) => {
let bucketName = req.params.bucket;
if (!this.supportsInflight) {
bucketName = req.params.bucket?.split('_')[0];
return res.status(200).json({
bytesTotal: this._data.bucket.get(bucketName)?.current || 0,
});
}
const bucketName = req.params.bucket;
const inflight = Number(req.body?.inflight) || 0;
this._updateData({
action: req.body?.action,
@ -41,7 +33,7 @@ class Scuba {
inflight,
});
const immediateInflights = req.body?.action === 'objectRestore' ? 0 : inflight;
return res.json({
res.json({
bytesTotal: (this._data.bucket.get(bucketName)?.current || 0) +
(this._data.bucket.get(bucketName)?.nonCurrent || 0) +
(this._data.bucket.get(bucketName)?.inflight || 0) +
@ -59,7 +51,7 @@ class Scuba {
if (!this._data.bucket.get(bucket)) {
this._data.bucket.set(bucket, { current: 0, nonCurrent: 0, inflight: 0 });
}
if (timeout && this.supportsInflight) {
if (timeout) {
setTimeout(() => {
if (this._data.bucket.get(bucket)) {
this._data.bucket.set(bucket, {
@ -98,23 +90,12 @@ class Scuba {
getInflightsForBucket(bucketName) {
let inflightCount = 0;
this._data.bucket.forEach((value, key) => {
if (!this.supportsInflight && key === bucketName) {
inflightCount += (value.current + value.nonCurrent);
} else if (this.supportsInflight && key.startsWith(`${bucketName}_`)) {
inflightCount += value.inflight;
if (key.startsWith(`${bucketName}_`)) {
inflightCount += value.inflight;
}
});
return inflightCount;
}
incrementBytesForBucket(bucketName, bytes) {
if (!this._data.bucket.has(bucketName)) {
this._data.bucket.set(bucketName, { current: 0, nonCurrent: 0, inflight: 0 });
}
const bucket = this._data.bucket.get(bucketName);
bucket.current += bytes;
this._data.bucket.set(bucketName, bucket);
}
}
module.exports = {

View File

@ -1,48 +0,0 @@
module.exports = {
entry: './index.js',
target: 'node',
devtool: 'source-map',
output: {
filename: 'zenko-vitastor.js'
},
module: {
rules: [
{
test: /.jsx?$/,
use: {
loader: 'babel-loader',
options: {
presets: [ [ "@babel/preset-env", { "targets": { "node": "12.0" }, "exclude": [ "transform-regenerator" ] } ] ],
}
}
},
{
test: /.json$/,
type: 'json'
}
]
},
externals: {
'leveldown': 'commonjs leveldown',
'bufferutil': 'commonjs bufferutil',
'diskusage': 'commonjs diskusage',
'utf-8-validate': 'commonjs utf-8-validate',
'fcntl': 'commonjs fcntl',
'ioctl': 'commonjs ioctl',
'vitastor': 'commonjs vitastor',
'vaultclient': 'commonjs vaultclient',
'bucketclient': 'commonjs bucketclient',
'scality-kms': 'commonjs scality-kms',
'sproxydclient': 'commonjs sproxydclient',
'hdclient': 'commonjs hdclient',
'cdmiclient': 'commonjs cdmiclient',
'kerberos': 'commonjs kerberos',
'@mongodb-js/zstd': 'commonjs @mongodb-js/zstd',
'@aws-sdk/credential-providers': 'commonjs @aws-sdk/credential-providers',
'snappy': 'commonjs snappy',
'mongodb-client-encryption': 'commonjs mongodb-client-encryption'
},
node: {
__dirname: false
}
};

6364
yarn.lock Normal file

File diff suppressed because it is too large Load Diff